aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--plumbing/format/idxfile/decoder.go2
-rw-r--r--plumbing/format/idxfile/decoder_test.go11
-rw-r--r--plumbing/format/idxfile/encoder.go2
-rw-r--r--plumbing/format/idxfile/encoder_test.go3
-rw-r--r--plumbing/format/idxfile/idxfile.go6
-rw-r--r--plumbing/format/packfile/decoder.go104
-rw-r--r--plumbing/format/packfile/decoder_test.go53
-rw-r--r--plumbing/format/packfile/index.go82
-rw-r--r--plumbing/format/packfile/index_test.go122
-rw-r--r--storage/filesystem/internal/dotgit/writers.go20
-rw-r--r--storage/filesystem/object.go51
11 files changed, 318 insertions, 138 deletions
diff --git a/plumbing/format/idxfile/decoder.go b/plumbing/format/idxfile/decoder.go
index fea5f0b..4243f76 100644
--- a/plumbing/format/idxfile/decoder.go
+++ b/plumbing/format/idxfile/decoder.go
@@ -104,7 +104,7 @@ func readObjectNames(idx *Idxfile, r io.Reader) error {
return err
}
- idx.Entries = append(idx.Entries, Entry{Hash: ref})
+ idx.Entries = append(idx.Entries, &Entry{Hash: ref})
}
return nil
diff --git a/plumbing/format/idxfile/decoder_test.go b/plumbing/format/idxfile/decoder_test.go
index 609f4e3..991232d 100644
--- a/plumbing/format/idxfile/decoder_test.go
+++ b/plumbing/format/idxfile/decoder_test.go
@@ -1,4 +1,4 @@
-package idxfile
+package idxfile_test
import (
"bytes"
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/src-d/go-git-fixtures"
+ . "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/storage/memory"
@@ -48,12 +49,8 @@ func (s *IdxfileSuite) TestDecodeCRCs(c *C) {
_, err = pd.Decode()
c.Assert(err, IsNil)
- i := &Idxfile{Version: VersionSupported}
-
- offsets := pd.Offsets()
- for h, crc := range pd.CRCs() {
- i.Add(h, uint64(offsets[h]), crc)
- }
+ i := pd.Index().ToIdxFile()
+ i.Version = VersionSupported
buf := bytes.NewBuffer(nil)
e := NewEncoder(buf)
diff --git a/plumbing/format/idxfile/encoder.go b/plumbing/format/idxfile/encoder.go
index 71e1b3f..d8f4d94 100644
--- a/plumbing/format/idxfile/encoder.go
+++ b/plumbing/format/idxfile/encoder.go
@@ -124,7 +124,7 @@ func (e *Encoder) encodeChecksums(idx *Idxfile) (int, error) {
}
// EntryList implements sort.Interface allowing sorting in increasing order.
-type EntryList []Entry
+type EntryList []*Entry
func (p EntryList) Len() int { return len(p) }
func (p EntryList) Less(i, j int) bool { return p[i].Hash.String() < p[j].Hash.String() }
diff --git a/plumbing/format/idxfile/encoder_test.go b/plumbing/format/idxfile/encoder_test.go
index 1fc4e9c..d566b0d 100644
--- a/plumbing/format/idxfile/encoder_test.go
+++ b/plumbing/format/idxfile/encoder_test.go
@@ -1,4 +1,4 @@
-package idxfile
+package idxfile_test
import (
"bytes"
@@ -6,6 +6,7 @@ import (
"github.com/src-d/go-git-fixtures"
"gopkg.in/src-d/go-git.v4/plumbing"
+ . "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
. "gopkg.in/check.v1"
)
diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go
index 5a718f3..b9bb1c2 100644
--- a/plumbing/format/idxfile/idxfile.go
+++ b/plumbing/format/idxfile/idxfile.go
@@ -21,6 +21,10 @@ type Idxfile struct {
IdxChecksum [20]byte
}
+func NewIdxfile() *Idxfile {
+ return &Idxfile{}
+}
+
// Entry is the in memory representation of an object entry in the idx file.
type Entry struct {
Hash plumbing.Hash
@@ -30,7 +34,7 @@ type Entry struct {
// Add adds a new Entry with the given values to the Idxfile.
func (idx *Idxfile) Add(h plumbing.Hash, offset uint64, crc32 uint32) {
- idx.Entries = append(idx.Entries, Entry{
+ idx.Entries = append(idx.Entries, &Entry{
Hash: h,
Offset: offset,
CRC32: crc32,
diff --git a/plumbing/format/packfile/decoder.go b/plumbing/format/packfile/decoder.go
index 21ddbbf..39680a3 100644
--- a/plumbing/format/packfile/decoder.go
+++ b/plumbing/format/packfile/decoder.go
@@ -56,10 +56,12 @@ type Decoder struct {
o storer.EncodedObjectStorer
tx storer.Transaction
- isDecoded bool
- offsetToHash map[int64]plumbing.Hash
- hashToOffset map[plumbing.Hash]int64
- crcs map[plumbing.Hash]uint32
+ isDecoded bool
+
+ // hasBuiltIndex indicates if the index is fully built or not. If it is not,
+ // will be built incrementally while decoding.
+ hasBuiltIndex bool
+ idx *Index
offsetToType map[int64]plumbing.ObjectType
decoderType plumbing.ObjectType
@@ -102,10 +104,7 @@ func NewDecoderForType(s *Scanner, o storer.EncodedObjectStorer,
s: s,
o: o,
- offsetToHash: make(map[int64]plumbing.Hash, 0),
- hashToOffset: make(map[plumbing.Hash]int64, 0),
- crcs: make(map[plumbing.Hash]uint32, 0),
-
+ idx: NewIndex(0),
offsetToType: make(map[int64]plumbing.ObjectType, 0),
decoderType: t,
@@ -139,6 +138,11 @@ func (d *Decoder) doDecode() error {
return err
}
+ if !d.hasBuiltIndex {
+ d.idx = NewIndex(int(count))
+ }
+ defer func() { d.hasBuiltIndex = true }()
+
_, isTxStorer := d.o.(storer.Transactioner)
switch {
case d.o == nil:
@@ -218,13 +222,22 @@ func (d *Decoder) DecodeObject() (plumbing.EncodedObject, error) {
}
func (d *Decoder) decodeIfSpecificType(h *ObjectHeader) (plumbing.EncodedObject, error) {
- var realType plumbing.ObjectType
- var err error
+ var (
+ obj plumbing.EncodedObject
+ realType plumbing.ObjectType
+ err error
+ )
switch h.Type {
case plumbing.OFSDeltaObject:
realType, err = d.ofsDeltaType(h.OffsetReference)
case plumbing.REFDeltaObject:
realType, err = d.refDeltaType(h.Reference)
+ if err == plumbing.ErrObjectNotFound {
+ obj, err = d.decodeByHeader(h)
+ if err != nil {
+ realType = obj.Type()
+ }
+ }
default:
realType = h.Type
}
@@ -236,6 +249,10 @@ func (d *Decoder) decodeIfSpecificType(h *ObjectHeader) (plumbing.EncodedObject,
d.offsetToType[h.Offset] = realType
if d.decoderType == realType {
+ if obj != nil {
+ return obj, nil
+ }
+
return d.decodeByHeader(h)
}
@@ -252,16 +269,12 @@ func (d *Decoder) ofsDeltaType(offset int64) (plumbing.ObjectType, error) {
}
func (d *Decoder) refDeltaType(ref plumbing.Hash) (plumbing.ObjectType, error) {
- if o, ok := d.hashToOffset[ref]; ok {
- return d.ofsDeltaType(o)
- }
-
- obj, err := d.o.EncodedObject(plumbing.AnyObject, ref)
- if err != nil {
- return plumbing.InvalidObject, err
+ e, ok := d.idx.LookupHash(ref)
+ if !ok {
+ return plumbing.InvalidObject, plumbing.ErrObjectNotFound
}
- return obj.Type(), nil
+ return d.ofsDeltaType(int64(e.Offset))
}
func (d *Decoder) decodeByHeader(h *ObjectHeader) (plumbing.EncodedObject, error) {
@@ -285,9 +298,9 @@ func (d *Decoder) decodeByHeader(h *ObjectHeader) (plumbing.EncodedObject, error
return obj, err
}
- hash := obj.Hash()
- d.setOffset(hash, h.Offset)
- d.setCRC(hash, crc)
+ if !d.hasBuiltIndex {
+ d.idx.Add(obj.Hash(), uint64(h.Offset), crc)
+ }
return obj, nil
}
@@ -365,10 +378,10 @@ func (d *Decoder) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset i
return 0, err
}
- h := d.offsetToHash[offset]
+ e, ok := d.idx.LookupOffset(uint64(offset))
var base plumbing.EncodedObject
- if h != plumbing.ZeroHash {
- base = d.cache.Get(h)
+ if ok {
+ base = d.cache.Get(e.Hash)
}
if base == nil {
@@ -385,22 +398,13 @@ func (d *Decoder) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset i
return crc, err
}
-func (d *Decoder) setOffset(h plumbing.Hash, offset int64) {
- d.offsetToHash[offset] = h
- d.hashToOffset[h] = offset
-}
-
-func (d *Decoder) setCRC(h plumbing.Hash, crc uint32) {
- d.crcs[h] = crc
-}
-
func (d *Decoder) recallByOffset(o int64) (plumbing.EncodedObject, error) {
if d.s.IsSeekable {
return d.DecodeObjectAt(o)
}
- if h, ok := d.offsetToHash[o]; ok {
- return d.recallByHashNonSeekable(h)
+ if e, ok := d.idx.LookupOffset(uint64(o)); ok {
+ return d.recallByHashNonSeekable(e.Hash)
}
return nil, plumbing.ErrObjectNotFound
@@ -408,8 +412,8 @@ func (d *Decoder) recallByOffset(o int64) (plumbing.EncodedObject, error) {
func (d *Decoder) recallByHash(h plumbing.Hash) (plumbing.EncodedObject, error) {
if d.s.IsSeekable {
- if o, ok := d.hashToOffset[h]; ok {
- return d.DecodeObjectAt(o)
+ if e, ok := d.idx.LookupHash(h); ok {
+ return d.DecodeObjectAt(int64(e.Offset))
}
}
@@ -432,22 +436,20 @@ func (d *Decoder) recallByHashNonSeekable(h plumbing.Hash) (obj plumbing.Encoded
return nil, plumbing.ErrObjectNotFound
}
-// SetOffsets sets the offsets, required when using the method DecodeObjectAt,
-// without decoding the full packfile
-func (d *Decoder) SetOffsets(offsets map[plumbing.Hash]int64) {
- d.hashToOffset = offsets
-}
-
-// Offsets returns the objects read offset, Decode method should be called
-// before to calculate the Offsets
-func (d *Decoder) Offsets() map[plumbing.Hash]int64 {
- return d.hashToOffset
+// SetIndex sets an index for the packfile. It is recommended to set this.
+// The index might be read from a file or reused from a previous Decoder usage
+// (see Index function).
+func (d *Decoder) SetIndex(idx *Index) {
+ d.hasBuiltIndex = true
+ d.idx = idx
}
-// CRCs returns the CRC-32 for each read object. Decode method should be called
-// before to calculate the CRCs
-func (d *Decoder) CRCs() map[plumbing.Hash]uint32 {
- return d.crcs
+// Index returns the index for the packfile. If index was set with SetIndex,
+// Index will return it. Otherwise, it will return an index that is built while
+// decoding. If neither SetIndex was called with a full index or Decode called
+// for the whole packfile, then the returned index will be incomplete.
+func (d *Decoder) Index() *Index {
+ return d.idx
}
// Close closes the Scanner. usually this mean that the whole reader is read and
diff --git a/plumbing/format/packfile/decoder_test.go b/plumbing/format/packfile/decoder_test.go
index f1e2ed7..ecf7c81 100644
--- a/plumbing/format/packfile/decoder_test.go
+++ b/plumbing/format/packfile/decoder_test.go
@@ -55,14 +55,8 @@ func (s *ReaderSuite) TestDecodeByTypeRefDelta(c *C) {
d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject)
c.Assert(err, IsNil)
- // Specific offset elements needed to decode correctly the ref-delta
- offsets := map[plumbing.Hash]int64{
- plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c"): 84880,
- plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e"): 85141,
- plumbing.NewHash("eba74343e2f15d62adedfd8c883ee0262b5c8021"): 85300,
- }
-
- d.SetOffsets(offsets)
+ // Index required to decode by ref-delta.
+ d.SetIndex(getIndexFromIdxFile(f.Idx()))
defer d.Close()
@@ -123,7 +117,7 @@ func (s *ReaderSuite) TestDecodeByType(c *C) {
// when the packfile is ref-delta based, the offsets are required
if f.Is("ref-delta") {
- d.SetOffsets(getOffsetsFromIdx(f.Idx()))
+ d.SetIndex(getIndexFromIdxFile(f.Idx()))
}
defer d.Close()
@@ -291,8 +285,9 @@ func (s *ReaderSuite) TestDecodeCRCs(c *C) {
c.Assert(err, IsNil)
var sum uint64
- for _, crc := range d.CRCs() {
- sum += uint64(crc)
+ idx := d.Index().ToIdxFile()
+ for _, e := range idx.Entries {
+ sum += uint64(e.CRC32)
}
c.Assert(int(sum), Equals, 78022211966)
@@ -306,8 +301,7 @@ func (s *ReaderSuite) TestReadObjectAt(c *C) {
// when the packfile is ref-delta based, the offsets are required
if f.Is("ref-delta") {
- offsets := getOffsetsFromIdx(f.Idx())
- d.SetOffsets(offsets)
+ d.SetIndex(getIndexFromIdxFile(f.Idx()))
}
// the objects at reference 186, is a delta, so should be recall,
@@ -317,32 +311,34 @@ func (s *ReaderSuite) TestReadObjectAt(c *C) {
c.Assert(obj.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
}
-func (s *ReaderSuite) TestOffsets(c *C) {
+func (s *ReaderSuite) TestIndex(c *C) {
f := fixtures.Basic().One()
scanner := packfile.NewScanner(f.Packfile())
d, err := packfile.NewDecoder(scanner, nil)
c.Assert(err, IsNil)
- c.Assert(d.Offsets(), HasLen, 0)
+ c.Assert(d.Index().ToIdxFile().Entries, HasLen, 0)
_, err = d.Decode()
c.Assert(err, IsNil)
- c.Assert(d.Offsets(), HasLen, 31)
+ c.Assert(len(d.Index().ToIdxFile().Entries), Equals, 31)
}
-func (s *ReaderSuite) TestSetOffsets(c *C) {
+func (s *ReaderSuite) TestSetIndex(c *C) {
f := fixtures.Basic().One()
scanner := packfile.NewScanner(f.Packfile())
d, err := packfile.NewDecoder(scanner, nil)
c.Assert(err, IsNil)
+ idx := packfile.NewIndex(1)
h := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
- d.SetOffsets(map[plumbing.Hash]int64{h: 42})
+ idx.Add(h, uint64(42), 0)
+ d.SetIndex(idx)
- o := d.Offsets()
- c.Assert(o, HasLen, 1)
- c.Assert(o[h], Equals, int64(42))
+ idxf := d.Index().ToIdxFile()
+ c.Assert(idxf.Entries, HasLen, 1)
+ c.Assert(idxf.Entries[0].Offset, Equals, uint64(42))
}
func assertObjects(c *C, s storer.EncodedObjectStorer, expects []string) {
@@ -362,17 +358,12 @@ func assertObjects(c *C, s storer.EncodedObjectStorer, expects []string) {
}
}
-func getOffsetsFromIdx(r io.Reader) map[plumbing.Hash]int64 {
- idx := &idxfile.Idxfile{}
- err := idxfile.NewDecoder(r).Decode(idx)
- if err != nil {
+func getIndexFromIdxFile(r io.Reader) *packfile.Index {
+ idxf := idxfile.NewIdxfile()
+ d := idxfile.NewDecoder(r)
+ if err := d.Decode(idxf); err != nil {
panic(err)
}
- offsets := make(map[plumbing.Hash]int64)
- for _, e := range idx.Entries {
- offsets[e.Hash] = int64(e.Offset)
- }
-
- return offsets
+ return packfile.NewIndexFromIdxFile(idxf)
}
diff --git a/plumbing/format/packfile/index.go b/plumbing/format/packfile/index.go
new file mode 100644
index 0000000..2c5f98f
--- /dev/null
+++ b/plumbing/format/packfile/index.go
@@ -0,0 +1,82 @@
+package packfile
+
+import (
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+)
+
+// Index is an in-memory representation of a packfile index.
+// This uses idxfile.Idxfile under the hood to obtain indexes from .idx files
+// or to store them.
+type Index struct {
+ byHash map[plumbing.Hash]*idxfile.Entry
+ byOffset map[uint64]*idxfile.Entry
+}
+
+// NewIndex creates a new empty index with the given size. Size is a hint and
+// can be 0. It is recommended to set it to the number of objects to be indexed
+// if it is known beforehand (e.g. reading from a packfile).
+func NewIndex(size int) *Index {
+ return &Index{
+ byHash: make(map[plumbing.Hash]*idxfile.Entry, size),
+ byOffset: make(map[uint64]*idxfile.Entry, size),
+ }
+}
+
+// NewIndexFromIdxFile creates a new Index from an idxfile.IdxFile.
+func NewIndexFromIdxFile(idxf *idxfile.Idxfile) *Index {
+ idx := &Index{
+ byHash: make(map[plumbing.Hash]*idxfile.Entry, idxf.ObjectCount),
+ byOffset: make(map[uint64]*idxfile.Entry, idxf.ObjectCount),
+ }
+ for _, e := range idxf.Entries {
+ idx.add(e)
+ }
+
+ return idx
+}
+
+// Add adds a new Entry with the given values to the index.
+func (idx *Index) Add(h plumbing.Hash, offset uint64, crc32 uint32) {
+ e := idxfile.Entry{
+ Hash: h,
+ Offset: offset,
+ CRC32: crc32,
+ }
+ idx.add(&e)
+}
+
+func (idx *Index) add(e *idxfile.Entry) {
+ idx.byHash[e.Hash] = e
+ idx.byOffset[e.Offset] = e
+}
+
+// LookupHash looks an entry up by its hash. An idxfile.Entry is returned and
+// a bool, which is true if it was found or false if it wasn't.
+func (idx *Index) LookupHash(h plumbing.Hash) (*idxfile.Entry, bool) {
+ e, ok := idx.byHash[h]
+ return e, ok
+}
+
+// LookupHash looks an entry up by its offset in the packfile. An idxfile.Entry
+// is returned and a bool, which is true if it was found or false if it wasn't.
+func (idx *Index) LookupOffset(offset uint64) (*idxfile.Entry, bool) {
+ e, ok := idx.byOffset[offset]
+ return e, ok
+}
+
+// Size returns the number of entries in the index.
+func (idx *Index) Size() int {
+ return len(idx.byHash)
+}
+
+// ToIdxFile converts the index to an idxfile.Idxfile, which can then be used
+// to serialize.
+func (idx *Index) ToIdxFile() *idxfile.Idxfile {
+ idxf := idxfile.NewIdxfile()
+ for _, e := range idx.byHash {
+ idxf.Entries = append(idxf.Entries, e)
+ }
+
+ return idxf
+}
diff --git a/plumbing/format/packfile/index_test.go b/plumbing/format/packfile/index_test.go
new file mode 100644
index 0000000..6714704
--- /dev/null
+++ b/plumbing/format/packfile/index_test.go
@@ -0,0 +1,122 @@
+package packfile
+
+import (
+ "strconv"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+
+ . "gopkg.in/check.v1"
+)
+
+type IndexSuite struct{}
+
+var _ = Suite(&IndexSuite{})
+
+func (s *IndexSuite) TestLookupOffset(c *C) {
+ idx := NewIndex(0)
+
+ for o1 := 0; o1 < 10000; o1 += 100 {
+ for o2 := 0; o2 < 10000; o2 += 100 {
+ if o2 >= o1 {
+ e, ok := idx.LookupOffset(uint64(o2))
+ c.Assert(ok, Equals, false)
+ c.Assert(e, IsNil)
+ } else {
+ e, ok := idx.LookupOffset(uint64(o2))
+ c.Assert(ok, Equals, true)
+ c.Assert(e, NotNil)
+ c.Assert(e.Hash, Equals, s.toHash(o2))
+ c.Assert(e.Offset, Equals, uint64(o2))
+ }
+ }
+
+ h1 := s.toHash(o1)
+ idx.Add(h1, uint64(o1), 0)
+
+ for o2 := 0; o2 < 10000; o2 += 100 {
+ if o2 > o1 {
+ e, ok := idx.LookupOffset(uint64(o2))
+ c.Assert(ok, Equals, false)
+ c.Assert(e, IsNil)
+ } else {
+ e, ok := idx.LookupOffset(uint64(o2))
+ c.Assert(ok, Equals, true)
+ c.Assert(e, NotNil)
+ c.Assert(e.Hash, Equals, s.toHash(o2))
+ c.Assert(e.Offset, Equals, uint64(o2))
+ }
+ }
+ }
+}
+
+func (s *IndexSuite) TestLookupHash(c *C) {
+ idx := NewIndex(0)
+
+ for o1 := 0; o1 < 10000; o1 += 100 {
+ for o2 := 0; o2 < 10000; o2 += 100 {
+ if o2 >= o1 {
+ e, ok := idx.LookupHash(s.toHash(o2))
+ c.Assert(ok, Equals, false)
+ c.Assert(e, IsNil)
+ } else {
+ e, ok := idx.LookupHash(s.toHash(o2))
+ c.Assert(ok, Equals, true)
+ c.Assert(e, NotNil)
+ c.Assert(e.Hash, Equals, s.toHash(o2))
+ c.Assert(e.Offset, Equals, uint64(o2))
+ }
+ }
+
+ h1 := s.toHash(o1)
+ idx.Add(h1, uint64(o1), 0)
+
+ for o2 := 0; o2 < 10000; o2 += 100 {
+ if o2 > o1 {
+ e, ok := idx.LookupHash(s.toHash(o2))
+ c.Assert(ok, Equals, false)
+ c.Assert(e, IsNil)
+ } else {
+ e, ok := idx.LookupHash(s.toHash(o2))
+ c.Assert(ok, Equals, true)
+ c.Assert(e, NotNil)
+ c.Assert(e.Hash, Equals, s.toHash(o2))
+ c.Assert(e.Offset, Equals, uint64(o2))
+ }
+ }
+ }
+}
+
+func (s *IndexSuite) TestSize(c *C) {
+ idx := NewIndex(0)
+
+ for o1 := 0; o1 < 1000; o1++ {
+ c.Assert(idx.Size(), Equals, o1)
+ h1 := s.toHash(o1)
+ idx.Add(h1, uint64(o1), 0)
+ }
+}
+
+func (s *IndexSuite) TestIdxFileEmpty(c *C) {
+ idx := NewIndex(0)
+ idxf := idx.ToIdxFile()
+ idx2 := NewIndexFromIdxFile(idxf)
+ c.Assert(idx, DeepEquals, idx2)
+}
+
+func (s *IndexSuite) TestIdxFile(c *C) {
+ idx := NewIndex(0)
+ for o1 := 0; o1 < 1000; o1++ {
+ h1 := s.toHash(o1)
+ idx.Add(h1, uint64(o1), 0)
+ }
+
+ idx2 := NewIndexFromIdxFile(idx.ToIdxFile())
+ c.Assert(idx, DeepEquals, idx2)
+}
+
+func (s *IndexSuite) toHash(i int) plumbing.Hash {
+ is := strconv.Itoa(i)
+ padding := strings.Repeat("a", 40-len(is))
+ return plumbing.NewHash(padding + is)
+}
diff --git a/storage/filesystem/internal/dotgit/writers.go b/storage/filesystem/internal/dotgit/writers.go
index 531259b..a7525d4 100644
--- a/storage/filesystem/internal/dotgit/writers.go
+++ b/storage/filesystem/internal/dotgit/writers.go
@@ -20,13 +20,13 @@ import (
// is renamed/moved (depends on the Filesystem implementation) to the final
// location, if the PackWriter is not used, nothing is written
type PackWriter struct {
- Notify func(h plumbing.Hash, i idxfile.Idxfile)
+ Notify func(plumbing.Hash, *packfile.Index)
fs billy.Filesystem
fr, fw billy.File
synced *syncedReader
checksum plumbing.Hash
- index idxfile.Idxfile
+ index *packfile.Index
result chan error
}
@@ -68,14 +68,7 @@ func (w *PackWriter) buildIndex() {
}
w.checksum = checksum
- w.index.PackfileChecksum = checksum
- w.index.Version = idxfile.VersionSupported
-
- offsets := d.Offsets()
- for h, crc := range d.CRCs() {
- w.index.Add(h, uint64(offsets[h]), crc)
- }
-
+ w.index = d.Index()
w.result <- err
}
@@ -122,7 +115,7 @@ func (w *PackWriter) Close() error {
return err
}
- if len(w.index.Entries) == 0 {
+ if w.index == nil || w.index.Size() == 0 {
return w.clean()
}
@@ -152,8 +145,11 @@ func (w *PackWriter) save() error {
}
func (w *PackWriter) encodeIdx(writer io.Writer) error {
+ idx := w.index.ToIdxFile()
+ idx.PackfileChecksum = w.checksum
+ idx.Version = idxfile.VersionSupported
e := idxfile.NewEncoder(writer)
- _, err := e.Encode(&w.index)
+ _, err := e.Encode(idx)
return err
}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index 8bf84f2..e235b33 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -18,7 +18,7 @@ import (
type ObjectStorage struct {
dir *dotgit.DotGit
- index map[plumbing.Hash]idx
+ index map[plumbing.Hash]*packfile.Index
}
func newObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) {
@@ -34,7 +34,7 @@ func (s *ObjectStorage) requireIndex() error {
return nil
}
- s.index = make(map[plumbing.Hash]idx, 0)
+ s.index = make(map[plumbing.Hash]*packfile.Index, 0)
packs, err := s.dir.ObjectPacks()
if err != nil {
return err
@@ -50,14 +50,19 @@ func (s *ObjectStorage) requireIndex() error {
}
func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) error {
- idxfile, err := s.dir.ObjectPackIdx(h)
+ f, err := s.dir.ObjectPackIdx(h)
if err != nil {
return err
}
- defer ioutil.CheckClose(idxfile, &err)
- s.index[h] = make(idx)
- err = s.index[h].Decode(idxfile)
+ defer ioutil.CheckClose(f, &err)
+ idxf := idxfile.NewIdxfile()
+ d := idxfile.NewDecoder(f)
+ if err = d.Decode(idxf); err != nil {
+ return err
+ }
+
+ s.index[h] = packfile.NewIndexFromIdxFile(idxf)
return err
}
@@ -75,11 +80,8 @@ func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) {
return nil, err
}
- w.Notify = func(h plumbing.Hash, idxfile idxfile.Idxfile) {
- s.index[h] = make(idx)
- for _, e := range idxfile.Entries {
- s.index[h][e.Hash] = int64(e.Offset)
- }
+ w.Notify = func(h plumbing.Hash, idx *packfile.Index) {
+ s.index[h] = idx
}
return w, nil
@@ -196,15 +198,15 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash) (plumbing.EncodedObject
return nil, err
}
- d.SetOffsets(s.index[pack])
+ d.SetIndex(s.index[pack])
obj, err := d.DecodeObjectAt(offset)
return obj, err
}
func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, int64) {
for packfile, index := range s.index {
- if offset, ok := index[h]; ok {
- return packfile, offset
+ if e, ok := index.LookupHash(h); ok {
+ return packfile, int64(e.Offset)
}
}
@@ -263,23 +265,6 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb
return iters, nil
}
-type idx map[plumbing.Hash]int64
-
-func (i idx) Decode(r io.Reader) error {
- idx := &idxfile.Idxfile{}
-
- d := idxfile.NewDecoder(r)
- if err := d.Decode(idx); err != nil {
- return err
- }
-
- for _, e := range idx.Entries {
- i[e.Hash] = int64(e.Offset)
- }
-
- return nil
-}
-
type packfileIter struct {
f billy.File
d *packfile.Decoder
@@ -295,7 +280,7 @@ func NewPackfileIter(f billy.File, t plumbing.ObjectType) (storer.EncodedObjectI
}
func newPackfileIter(f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]bool,
- index idx) (storer.EncodedObjectIter, error) {
+ index *packfile.Index) (storer.EncodedObjectIter, error) {
s := packfile.NewScanner(f)
_, total, err := s.Header()
if err != nil {
@@ -307,7 +292,7 @@ func newPackfileIter(f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash
return nil, err
}
- d.SetOffsets(index)
+ d.SetIndex(index)
return &packfileIter{
f: f,