aboutsummaryrefslogtreecommitdiffstats
path: root/plumbing
diff options
context:
space:
mode:
Diffstat (limited to 'plumbing')
-rw-r--r--plumbing/format/commitgraph/commitgraph.go35
-rw-r--r--plumbing/format/commitgraph/commitgraph_test.go135
-rw-r--r--plumbing/format/commitgraph/encoder.go189
-rw-r--r--plumbing/format/commitgraph/file.go259
-rw-r--r--plumbing/format/commitgraph/memory.go71
-rw-r--r--plumbing/format/packfile/common.go10
-rw-r--r--plumbing/format/packfile/packfile.go182
-rw-r--r--plumbing/format/packfile/scanner.go189
-rw-r--r--plumbing/format/packfile/scanner_test.go49
9 files changed, 958 insertions, 161 deletions
diff --git a/plumbing/format/commitgraph/commitgraph.go b/plumbing/format/commitgraph/commitgraph.go
new file mode 100644
index 0000000..9bf7149
--- /dev/null
+++ b/plumbing/format/commitgraph/commitgraph.go
@@ -0,0 +1,35 @@
+package commitgraph
+
+import (
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+// Node is a reduced representation of Commit as presented in the commit graph
+// file. It is merely useful as an optimization for walking the commit graphs.
+type Node struct {
+ // TreeHash is the hash of the root tree of the commit.
+ TreeHash plumbing.Hash
+ // ParentIndexes are the indexes of the parent commits of the commit.
+ ParentIndexes []int
+ // ParentHashes are the hashes of the parent commits of the commit.
+ ParentHashes []plumbing.Hash
+ // Generation number is the pre-computed generation in the commit graph
+ // or zero if not available
+ Generation int
+ // When is the timestamp of the commit.
+ When time.Time
+}
+
+// Index represents a representation of commit graph that allows indexed
+// access to the nodes using commit object hash
+type Index interface {
+ // GetIndexByHash gets the index in the commit graph from commit hash, if available
+ GetIndexByHash(h plumbing.Hash) (int, error)
+ // GetNodeByIndex gets the commit node from the commit graph using index
+ // obtained from child node, if available
+ GetNodeByIndex(i int) (*Node, error)
+ // Hashes returns all the hashes that are available in the index
+ Hashes() []plumbing.Hash
+}
diff --git a/plumbing/format/commitgraph/commitgraph_test.go b/plumbing/format/commitgraph/commitgraph_test.go
new file mode 100644
index 0000000..b984142
--- /dev/null
+++ b/plumbing/format/commitgraph/commitgraph_test.go
@@ -0,0 +1,135 @@
+package commitgraph_test
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "testing"
+
+ "golang.org/x/exp/mmap"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git-fixtures.v3"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type CommitgraphSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&CommitgraphSuite{})
+
+func testDecodeHelper(c *C, path string) {
+ reader, err := mmap.Open(path)
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+
+ // Root commit
+ nodeIndex, err := index.GetIndexByHash(plumbing.NewHash("347c91919944a68e9413581a1bc15519550a3afe"))
+ c.Assert(err, IsNil)
+ node, err := index.GetNodeByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(node.ParentIndexes), Equals, 0)
+ c.Assert(len(node.ParentHashes), Equals, 0)
+
+ // Regular commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("e713b52d7e13807e87a002e812041f248db3f643"))
+ c.Assert(err, IsNil)
+ node, err = index.GetNodeByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(node.ParentIndexes), Equals, 1)
+ c.Assert(len(node.ParentHashes), Equals, 1)
+ c.Assert(node.ParentHashes[0].String(), Equals, "347c91919944a68e9413581a1bc15519550a3afe")
+
+ // Merge commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("b29328491a0682c259bcce28741eac71f3499f7d"))
+ c.Assert(err, IsNil)
+ node, err = index.GetNodeByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(node.ParentIndexes), Equals, 2)
+ c.Assert(len(node.ParentHashes), Equals, 2)
+ c.Assert(node.ParentHashes[0].String(), Equals, "e713b52d7e13807e87a002e812041f248db3f643")
+ c.Assert(node.ParentHashes[1].String(), Equals, "03d2c021ff68954cf3ef0a36825e194a4b98f981")
+
+ // Octopus merge commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
+ c.Assert(err, IsNil)
+ node, err = index.GetNodeByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(node.ParentIndexes), Equals, 3)
+ c.Assert(len(node.ParentHashes), Equals, 3)
+ c.Assert(node.ParentHashes[0].String(), Equals, "ce275064ad67d51e99f026084e20827901a8361c")
+ c.Assert(node.ParentHashes[1].String(), Equals, "bb13916df33ed23004c3ce9ed3b8487528e655c1")
+ c.Assert(node.ParentHashes[2].String(), Equals, "a45273fe2d63300e1962a9e26a6b15c276cd7082")
+
+ // Check all hashes
+ hashes := index.Hashes()
+ c.Assert(len(hashes), Equals, 11)
+ c.Assert(hashes[0].String(), Equals, "03d2c021ff68954cf3ef0a36825e194a4b98f981")
+ c.Assert(hashes[10].String(), Equals, "e713b52d7e13807e87a002e812041f248db3f643")
+}
+
+func (s *CommitgraphSuite) TestDecode(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+ testDecodeHelper(c, path.Join(dotgit.Root(), "objects", "info", "commit-graph"))
+ })
+}
+
+func (s *CommitgraphSuite) TestReencode(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+
+ reader, err := mmap.Open(path.Join(dotgit.Root(), "objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+
+ writer, err := ioutil.TempFile(dotgit.Root(), "commit-graph")
+ c.Assert(err, IsNil)
+ tmpName := writer.Name()
+ defer os.Remove(tmpName)
+ encoder := commitgraph.NewEncoder(writer)
+ err = encoder.Encode(index)
+ c.Assert(err, IsNil)
+ writer.Close()
+
+ testDecodeHelper(c, tmpName)
+ })
+}
+
+func (s *CommitgraphSuite) TestReencodeInMemory(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+
+ reader, err := mmap.Open(path.Join(dotgit.Root(), "objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+ memoryIndex := commitgraph.NewMemoryIndex()
+ for i, hash := range index.Hashes() {
+ node, err := index.GetNodeByIndex(i)
+ c.Assert(err, IsNil)
+ err = memoryIndex.Add(hash, node)
+ c.Assert(err, IsNil)
+ }
+ reader.Close()
+
+ writer, err := ioutil.TempFile(dotgit.Root(), "commit-graph")
+ c.Assert(err, IsNil)
+ tmpName := writer.Name()
+ defer os.Remove(tmpName)
+ encoder := commitgraph.NewEncoder(writer)
+ err = encoder.Encode(memoryIndex)
+ c.Assert(err, IsNil)
+ writer.Close()
+
+ testDecodeHelper(c, tmpName)
+ })
+}
diff --git a/plumbing/format/commitgraph/encoder.go b/plumbing/format/commitgraph/encoder.go
new file mode 100644
index 0000000..501b09e
--- /dev/null
+++ b/plumbing/format/commitgraph/encoder.go
@@ -0,0 +1,189 @@
+package commitgraph
+
+import (
+ "crypto/sha1"
+ "hash"
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+// Encoder writes MemoryIndex structs to an output stream.
+type Encoder struct {
+ io.Writer
+ hash hash.Hash
+}
+
+// NewEncoder returns a new stream encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ h := sha1.New()
+ mw := io.MultiWriter(w, h)
+ return &Encoder{mw, h}
+}
+
+func (e *Encoder) Encode(idx Index) error {
+ var err error
+
+ // Get all the hashes in the input index
+ hashes := idx.Hashes()
+
+ // Sort the inout and prepare helper structures we'll need for encoding
+ hashToIndex, fanout, largeEdgesCount := e.prepare(idx, hashes)
+
+ chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
+ chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * 20, uint64(len(hashes)) * 36}
+ if largeEdgesCount > 0 {
+ chunkSignatures = append(chunkSignatures, largeEdgeListSignature)
+ chunkSizes = append(chunkSizes, uint64(largeEdgesCount)*4)
+ }
+
+ if err = e.encodeFileHeader(len(chunkSignatures)); err != nil {
+ return err
+ }
+ if err = e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
+ return err
+ }
+ if err = e.encodeFanout(fanout); err != nil {
+ return err
+ }
+ if err = e.encodeOidLookup(hashes); err != nil {
+ return err
+ }
+ if largeEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
+ if err = e.encodeLargeEdges(largeEdges); err != nil {
+ return err
+ }
+ }
+ if err != nil {
+ return err
+ }
+ return e.encodeChecksum()
+}
+
+func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, largeEdgesCount uint32) {
+ // Sort the hashes and build our index
+ plumbing.HashesSort(hashes)
+ hashToIndex = make(map[plumbing.Hash]uint32)
+ fanout = make([]uint32, 256)
+ for i, hash := range hashes {
+ hashToIndex[hash] = uint32(i)
+ fanout[hash[0]]++
+ }
+
+ // Convert the fanout to cumulative values
+ for i := 1; i <= 0xff; i++ {
+ fanout[i] += fanout[i-1]
+ }
+
+ // Find out if we will need large edge table
+ for i := 0; i < len(hashes); i++ {
+ v, _ := idx.GetNodeByIndex(i)
+ if len(v.ParentHashes) > 2 {
+ largeEdgesCount += uint32(len(v.ParentHashes) - 1)
+ break
+ }
+ }
+
+ return
+}
+
+func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
+ if _, err = e.Write(commitFileSignature); err == nil {
+ _, err = e.Write([]byte{1, 1, byte(chunkCount), 0})
+ }
+ return
+}
+
+func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
+ // 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
+ offset := uint64(8 + len(chunkSignatures)*12 + 12)
+ for i, signature := range chunkSignatures {
+ if _, err = e.Write(signature); err == nil {
+ err = binary.WriteUint64(e, offset)
+ }
+ if err != nil {
+ return
+ }
+ offset += chunkSizes[i]
+ }
+ if _, err = e.Write(lastSignature); err == nil {
+ err = binary.WriteUint64(e, offset)
+ }
+ return
+}
+
+func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
+ for i := 0; i <= 0xff; i++ {
+ if err = binary.WriteUint32(e, fanout[i]); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
+ for _, hash := range hashes {
+ if _, err = e.Write(hash[:]); err != nil {
+ return err
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (largeEdges []uint32, err error) {
+ for _, hash := range hashes {
+ origIndex, _ := idx.GetIndexByHash(hash)
+ commitData, _ := idx.GetNodeByIndex(origIndex)
+ if _, err = e.Write(commitData.TreeHash[:]); err != nil {
+ return
+ }
+
+ var parent1, parent2 uint32
+ if len(commitData.ParentHashes) == 0 {
+ parent1 = parentNone
+ parent2 = parentNone
+ } else if len(commitData.ParentHashes) == 1 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = parentNone
+ } else if len(commitData.ParentHashes) == 2 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = hashToIndex[commitData.ParentHashes[1]]
+ } else if len(commitData.ParentHashes) > 2 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = uint32(len(largeEdges)) | parentOctopusUsed
+ for _, parentHash := range commitData.ParentHashes[1:] {
+ largeEdges = append(largeEdges, hashToIndex[parentHash])
+ }
+ largeEdges[len(largeEdges)-1] |= parentLast
+ }
+
+ if err = binary.WriteUint32(e, parent1); err == nil {
+ err = binary.WriteUint32(e, parent2)
+ }
+ if err != nil {
+ return
+ }
+
+ unixTime := uint64(commitData.When.Unix())
+ unixTime |= uint64(commitData.Generation) << 34
+ if err = binary.WriteUint64(e, unixTime); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeLargeEdges(largeEdges []uint32) (err error) {
+ for _, parent := range largeEdges {
+ if err = binary.WriteUint32(e, parent); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeChecksum() error {
+ _, err := e.Write(e.hash.Sum(nil)[:20])
+ return err
+}
diff --git a/plumbing/format/commitgraph/file.go b/plumbing/format/commitgraph/file.go
new file mode 100644
index 0000000..dce6243
--- /dev/null
+++ b/plumbing/format/commitgraph/file.go
@@ -0,0 +1,259 @@
+package commitgraph
+
+import (
+ "bytes"
+ encbin "encoding/binary"
+ "errors"
+ "io"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+var (
+ // ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
+ // file version is not supported.
+ ErrUnsupportedVersion = errors.New("Unsuported version")
+ // ErrUnsupportedHash is returned by OpenFileIndex when the commit graph
+ // hash function is not supported. Currently only SHA-1 is defined and
+ // supported
+ ErrUnsupportedHash = errors.New("Unsuported hash algorithm")
+ // ErrMalformedCommitGraphFile is returned by OpenFileIndex when the commit
+ // graph file is corrupted.
+ ErrMalformedCommitGraphFile = errors.New("Malformed commit graph file")
+
+ commitFileSignature = []byte{'C', 'G', 'P', 'H'}
+ oidFanoutSignature = []byte{'O', 'I', 'D', 'F'}
+ oidLookupSignature = []byte{'O', 'I', 'D', 'L'}
+ commitDataSignature = []byte{'C', 'D', 'A', 'T'}
+ largeEdgeListSignature = []byte{'E', 'D', 'G', 'E'}
+ lastSignature = []byte{0, 0, 0, 0}
+
+ parentNone = uint32(0x70000000)
+ parentOctopusUsed = uint32(0x80000000)
+ parentOctopusMask = uint32(0x7fffffff)
+ parentLast = uint32(0x80000000)
+)
+
+type fileIndex struct {
+ reader io.ReaderAt
+ fanout [256]int
+ oidFanoutOffset int64
+ oidLookupOffset int64
+ commitDataOffset int64
+ largeEdgeListOffset int64
+}
+
+// OpenFileIndex opens a serialized commit graph file in the format described at
+// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
+func OpenFileIndex(reader io.ReaderAt) (Index, error) {
+ fi := &fileIndex{reader: reader}
+
+ if err := fi.verifyFileHeader(); err != nil {
+ return nil, err
+ }
+ if err := fi.readChunkHeaders(); err != nil {
+ return nil, err
+ }
+ if err := fi.readFanout(); err != nil {
+ return nil, err
+ }
+
+ return fi, nil
+}
+
+func (fi *fileIndex) verifyFileHeader() error {
+ // Verify file signature
+ var signature = make([]byte, 4)
+ if _, err := fi.reader.ReadAt(signature, 0); err != nil {
+ return err
+ }
+ if !bytes.Equal(signature, commitFileSignature) {
+ return ErrMalformedCommitGraphFile
+ }
+
+ // Read and verify the file header
+ var header = make([]byte, 4)
+ if _, err := fi.reader.ReadAt(header, 4); err != nil {
+ return err
+ }
+ if header[0] != 1 {
+ return ErrUnsupportedVersion
+ }
+ if header[1] != 1 {
+ return ErrUnsupportedHash
+ }
+
+ return nil
+}
+
+func (fi *fileIndex) readChunkHeaders() error {
+ var chunkID = make([]byte, 4)
+ for i := 0; ; i++ {
+ chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
+ if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
+ return err
+ }
+ chunkOffset, err := binary.ReadUint64(chunkHeader)
+ if err != nil {
+ return err
+ }
+
+ if bytes.Equal(chunkID, oidFanoutSignature) {
+ fi.oidFanoutOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, oidLookupSignature) {
+ fi.oidLookupOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, commitDataSignature) {
+ fi.commitDataOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, largeEdgeListSignature) {
+ fi.largeEdgeListOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, lastSignature) {
+ break
+ }
+ }
+
+ if fi.oidFanoutOffset <= 0 || fi.oidLookupOffset <= 0 || fi.commitDataOffset <= 0 {
+ return ErrMalformedCommitGraphFile
+ }
+
+ return nil
+}
+
+func (fi *fileIndex) readFanout() error {
+ fanoutReader := io.NewSectionReader(fi.reader, fi.oidFanoutOffset, 256*4)
+ for i := 0; i < 256; i++ {
+ fanoutValue, err := binary.ReadUint32(fanoutReader)
+ if err != nil {
+ return err
+ }
+ if fanoutValue > 0x7fffffff {
+ return ErrMalformedCommitGraphFile
+ }
+ fi.fanout[i] = int(fanoutValue)
+ }
+ return nil
+}
+
+func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
+ var oid plumbing.Hash
+
+ // Find the hash in the oid lookup table
+ var low int
+ if h[0] == 0 {
+ low = 0
+ } else {
+ low = fi.fanout[h[0]-1]
+ }
+ high := fi.fanout[h[0]]
+ for low < high {
+ mid := (low + high) >> 1
+ offset := fi.oidLookupOffset + int64(mid)*20
+ if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
+ return 0, err
+ }
+ cmp := bytes.Compare(h[:], oid[:])
+ if cmp < 0 {
+ high = mid
+ } else if cmp == 0 {
+ return mid, nil
+ } else {
+ low = mid + 1
+ }
+ }
+
+ return 0, plumbing.ErrObjectNotFound
+}
+
+func (fi *fileIndex) GetNodeByIndex(idx int) (*Node, error) {
+ if idx >= fi.fanout[0xff] {
+ return nil, plumbing.ErrObjectNotFound
+ }
+
+ offset := fi.commitDataOffset + int64(idx)*36
+ commitDataReader := io.NewSectionReader(fi.reader, offset, 36)
+
+ treeHash, err := binary.ReadHash(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ parent1, err := binary.ReadUint32(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ parent2, err := binary.ReadUint32(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ genAndTime, err := binary.ReadUint64(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+
+ var parentIndexes []int
+ if parent2&parentOctopusUsed == parentOctopusUsed {
+ // Octopus merge
+ parentIndexes = []int{int(parent1 & parentOctopusMask)}
+ offset := fi.largeEdgeListOffset + 4*int64(parent2&parentOctopusMask)
+ buf := make([]byte, 4)
+ for {
+ _, err := fi.reader.ReadAt(buf, offset)
+ if err != nil {
+ return nil, err
+ }
+
+ parent := encbin.BigEndian.Uint32(buf)
+ offset += 4
+ parentIndexes = append(parentIndexes, int(parent&parentOctopusMask))
+ if parent&parentLast == parentLast {
+ break
+ }
+ }
+ } else if parent2 != parentNone {
+ parentIndexes = []int{int(parent1 & parentOctopusMask), int(parent2 & parentOctopusMask)}
+ } else if parent1 != parentNone {
+ parentIndexes = []int{int(parent1 & parentOctopusMask)}
+ }
+
+ parentHashes, err := fi.getHashesFromIndexes(parentIndexes)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Node{
+ TreeHash: treeHash,
+ ParentIndexes: parentIndexes,
+ ParentHashes: parentHashes,
+ Generation: int(genAndTime >> 34),
+ When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
+ }, nil
+}
+
+func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error) {
+ hashes := make([]plumbing.Hash, len(indexes))
+
+ for i, idx := range indexes {
+ if idx >= fi.fanout[0xff] {
+ return nil, ErrMalformedCommitGraphFile
+ }
+
+ offset := fi.oidLookupOffset + int64(idx)*20
+ if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
+ return nil, err
+ }
+ }
+
+ return hashes, nil
+}
+
+// Hashes returns all the hashes that are available in the index
+func (fi *fileIndex) Hashes() []plumbing.Hash {
+ hashes := make([]plumbing.Hash, fi.fanout[0xff])
+ for i := 0; i < int(fi.fanout[0xff]); i++ {
+ offset := fi.oidLookupOffset + int64(i)*20
+ if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
+ return nil
+ }
+ }
+ return hashes
+}
diff --git a/plumbing/format/commitgraph/memory.go b/plumbing/format/commitgraph/memory.go
new file mode 100644
index 0000000..316bc6d
--- /dev/null
+++ b/plumbing/format/commitgraph/memory.go
@@ -0,0 +1,71 @@
+package commitgraph
+
+import (
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type MemoryIndex struct {
+ commitData []*Node
+ indexMap map[plumbing.Hash]int
+}
+
+// NewMemoryIndex creates in-memory commit graph representation
+func NewMemoryIndex() *MemoryIndex {
+ return &MemoryIndex{
+ indexMap: make(map[plumbing.Hash]int),
+ }
+}
+
+// GetIndexByHash gets the index in the commit graph from commit hash, if available
+func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
+ i, ok := mi.indexMap[h]
+ if ok {
+ return i, nil
+ }
+
+ return 0, plumbing.ErrObjectNotFound
+}
+
+// GetNodeByIndex gets the commit node from the commit graph using index
+// obtained from child node, if available
+func (mi *MemoryIndex) GetNodeByIndex(i int) (*Node, error) {
+ if int(i) >= len(mi.commitData) {
+ return nil, plumbing.ErrObjectNotFound
+ }
+
+ node := mi.commitData[i]
+
+ // Map parent hashes to parent indexes
+ if node.ParentIndexes == nil {
+ parentIndexes := make([]int, len(node.ParentHashes))
+ for i, parentHash := range node.ParentHashes {
+ var err error
+ if parentIndexes[i], err = mi.GetIndexByHash(parentHash); err != nil {
+ return nil, err
+ }
+ }
+ node.ParentIndexes = parentIndexes
+ }
+
+ return node, nil
+}
+
+// Hashes returns all the hashes that are available in the index
+func (mi *MemoryIndex) Hashes() []plumbing.Hash {
+ hashes := make([]plumbing.Hash, 0, len(mi.indexMap))
+ for k := range mi.indexMap {
+ hashes = append(hashes, k)
+ }
+ return hashes
+}
+
+// Add adds new node to the memory index
+func (mi *MemoryIndex) Add(hash plumbing.Hash, node *Node) error {
+ // The parent indexes are calculated lazily in GetNodeByIndex
+ // which allows adding nodes out of order as long as all parents
+ // are eventually resolved
+ node.ParentIndexes = nil
+ mi.indexMap[hash] = len(mi.commitData)
+ mi.commitData = append(mi.commitData, node)
+ return nil
+}
diff --git a/plumbing/format/packfile/common.go b/plumbing/format/packfile/common.go
index 0d9ed54..f82c1ab 100644
--- a/plumbing/format/packfile/common.go
+++ b/plumbing/format/packfile/common.go
@@ -2,6 +2,7 @@ package packfile
import (
"bytes"
+ "compress/zlib"
"io"
"sync"
@@ -66,3 +67,12 @@ var bufPool = sync.Pool{
return bytes.NewBuffer(nil)
},
}
+
+var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
+
+var zlibReaderPool = sync.Pool{
+ New: func() interface{} {
+ r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
+ return r
+ },
+}
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
index c09286e..f528073 100644
--- a/plumbing/format/packfile/packfile.go
+++ b/plumbing/format/packfile/packfile.go
@@ -76,20 +76,18 @@ func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
return nil, err
}
- return p.GetByOffset(offset)
+ return p.objectAtOffset(offset, h)
}
-// GetByOffset retrieves the encoded object from the packfile with the given
+// GetByOffset retrieves the encoded object from the packfile at the given
// offset.
func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
hash, err := p.FindHash(o)
- if err == nil {
- if obj, ok := p.deltaBaseCache.Get(hash); ok {
- return obj, nil
- }
+ if err != nil {
+ return nil, err
}
- return p.objectAtOffset(o)
+ return p.objectAtOffset(o, hash)
}
// GetSizeByOffset retrieves the size of the encoded object from the
@@ -122,6 +120,13 @@ func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
return h, err
}
+func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 {
+ delta := buf.Bytes()
+ _, delta = decodeLEB128(delta) // skip src size
+ sz, _ := decodeLEB128(delta)
+ return int64(sz)
+}
+
func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
@@ -135,10 +140,7 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
return 0, err
}
- delta := buf.Bytes()
- _, delta = decodeLEB128(delta) // skip src size
- sz, _ := decodeLEB128(delta)
- return int64(sz), nil
+ return p.getDeltaObjectSize(buf), nil
default:
return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
}
@@ -176,10 +178,16 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
err = ErrInvalidObject.AddDetails("type %q", h.Type)
}
+ p.offsetToType[h.Offset] = typ
+
return
}
-func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
+func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) {
+ if obj, ok := p.cacheGet(hash); ok {
+ return obj, nil
+ }
+
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
if err == io.EOF || isInvalid(err) {
@@ -188,27 +196,54 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error)
return nil, err
}
+ return p.getNextObject(h, hash)
+}
+
+func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) {
+ var err error
+
// If we have no filesystem, we will return a MemoryObject instead
// of an FSObject.
if p.fs == nil {
- return p.getNextObject(h)
+ return p.getNextMemoryObject(h)
}
- // If the object is not a delta and it's small enough then read it
- // completely into memory now since it is already read from disk
- // into buffer anyway.
- if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
- return p.getNextObject(h)
- }
+ // If the object is small enough then read it completely into memory now since
+ // it is already read from disk into buffer anyway. For delta objects we want
+ // to perform the optimization too, but we have to be careful about applying
+ // small deltas on big objects.
+ var size int64
+ if h.Length <= smallObjectThreshold {
+ if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
+ return p.getNextMemoryObject(h)
+ }
- hash, err := p.FindHash(h.Offset)
- if err != nil {
- return nil, err
- }
+ // For delta objects we read the delta data and apply the small object
+ // optimization only if the expanded version of the object still meets
+ // the small object threshold condition.
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ if _, _, err := p.s.NextObject(buf); err != nil {
+ return nil, err
+ }
+ defer bufPool.Put(buf)
- size, err := p.getObjectSize(h)
- if err != nil {
- return nil, err
+ size = p.getDeltaObjectSize(buf)
+ if size <= smallObjectThreshold {
+ var obj = new(plumbing.MemoryObject)
+ obj.SetSize(size)
+ if h.Type == plumbing.REFDeltaObject {
+ err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf)
+ } else {
+ err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf)
+ }
+ return obj, err
+ }
+ } else {
+ size, err = p.getObjectSize(h)
+ if err != nil {
+ return nil, err
+ }
}
typ, err := p.getObjectType(h)
@@ -231,25 +266,14 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error)
}
func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
- ref, err := p.FindHash(offset)
- if err == nil {
- obj, ok := p.cacheGet(ref)
- if ok {
- reader, err := obj.Reader()
- if err != nil {
- return nil, err
- }
-
- return reader, nil
- }
- }
-
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
return nil, err
}
- obj, err := p.getNextObject(h)
+ // getObjectContent is called from FSObject, so we have to explicitly
+ // get memory object here to avoid recursive cycle
+ obj, err := p.getNextMemoryObject(h)
if err != nil {
return nil, err
}
@@ -257,7 +281,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader()
}
-func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
+func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
obj.SetType(h.Type)
@@ -278,6 +302,8 @@ func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error
return nil, err
}
+ p.offsetToType[h.Offset] = obj.Type()
+
return obj, nil
}
@@ -300,6 +326,13 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
if err != nil {
return err
}
+ defer bufPool.Put(buf)
+
+ return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
+}
+
+func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
+ var err error
base, ok := p.cacheGet(ref)
if !ok {
@@ -312,30 +345,31 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
obj.SetType(base.Type())
err = ApplyDelta(obj, base, buf.Bytes())
p.cachePut(obj)
- bufPool.Put(buf)
return err
}
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
- buf := bytes.NewBuffer(nil)
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
+ defer bufPool.Put(buf)
- var base plumbing.EncodedObject
- var ok bool
+ return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
+}
+
+func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset)
- if err == nil {
- base, ok = p.cacheGet(hash)
+ if err != nil {
+ return err
}
- if !ok {
- base, err = p.GetByOffset(offset)
- if err != nil {
- return err
- }
+ base, err := p.objectAtOffset(offset, hash)
+ if err != nil {
+ return err
}
obj.SetType(base.Type())
@@ -442,14 +476,50 @@ func (i *objectIter) Next() (plumbing.EncodedObject, error) {
return nil, err
}
- obj, err := i.p.GetByOffset(int64(e.Offset))
+ if i.typ != plumbing.AnyObject {
+ if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok {
+ if typ != i.typ {
+ continue
+ }
+ } else if obj, ok := i.p.cacheGet(e.Hash); ok {
+ if obj.Type() != i.typ {
+ i.p.offsetToType[int64(e.Offset)] = obj.Type()
+ continue
+ }
+ return obj, nil
+ } else {
+ h, err := i.p.objectHeaderAtOffset(int64(e.Offset))
+ if err != nil {
+ return nil, err
+ }
+
+ if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject {
+ typ, err := i.p.getObjectType(h)
+ if err != nil {
+ return nil, err
+ }
+ if typ != i.typ {
+ i.p.offsetToType[int64(e.Offset)] = typ
+ continue
+ }
+ // getObjectType will seek in the file so we cannot use getNextObject safely
+ return i.p.objectAtOffset(int64(e.Offset), e.Hash)
+ } else {
+ if h.Type != i.typ {
+ i.p.offsetToType[int64(e.Offset)] = h.Type
+ continue
+ }
+ return i.p.getNextObject(h, e.Hash)
+ }
+ }
+ }
+
+ obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash)
if err != nil {
return nil, err
}
- if i.typ == plumbing.AnyObject || obj.Type() == i.typ {
- return obj, nil
- }
+ return obj, nil
}
}
diff --git a/plumbing/format/packfile/scanner.go b/plumbing/format/packfile/scanner.go
index 614b0d1..7b44192 100644
--- a/plumbing/format/packfile/scanner.go
+++ b/plumbing/format/packfile/scanner.go
@@ -39,8 +39,7 @@ type ObjectHeader struct {
}
type Scanner struct {
- r reader
- zr readerResetter
+ r *scannerReader
crc hash.Hash32
// pendingObject is used to detect if an object has been read, or still
@@ -56,19 +55,27 @@ type Scanner struct {
// NewScanner returns a new Scanner based on a reader, if the given reader
// implements io.ReadSeeker the Scanner will be also Seekable
func NewScanner(r io.Reader) *Scanner {
- seeker, ok := r.(io.ReadSeeker)
- if !ok {
- seeker = &trackableReader{Reader: r}
- }
+ _, ok := r.(io.ReadSeeker)
crc := crc32.NewIEEE()
return &Scanner{
- r: newTeeReader(newByteReadSeeker(seeker), crc),
+ r: newScannerReader(r, crc),
crc: crc,
IsSeekable: ok,
}
}
+func (s *Scanner) Reset(r io.Reader) {
+ _, ok := r.(io.ReadSeeker)
+
+ s.r.Reset(r)
+ s.crc.Reset()
+ s.IsSeekable = ok
+ s.pendingObject = nil
+ s.version = 0
+ s.objects = 0
+}
+
// Header reads the whole packfile header (signature, version and object count).
// It returns the version and the object count and performs checks on the
// validity of the signature and the version fields.
@@ -182,8 +189,7 @@ func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
// nextObjectHeader returns the ObjectHeader for the next object in the reader
// without the Offset field
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
- defer s.Flush()
-
+ s.r.Flush()
s.crc.Reset()
h := &ObjectHeader{}
@@ -304,35 +310,29 @@ func (s *Scanner) readLength(first byte) (int64, error) {
// NextObject writes the content of the next object into the reader, returns
// the number of bytes written, the CRC32 of the content and an error, if any
func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
- defer s.crc.Reset()
-
s.pendingObject = nil
written, err = s.copyObject(w)
- s.Flush()
+
+ s.r.Flush()
crc32 = s.crc.Sum32()
+ s.crc.Reset()
+
return
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
- if s.zr == nil {
- var zr io.ReadCloser
- zr, err = zlib.NewReader(s.r)
- if err != nil {
- return 0, fmt.Errorf("zlib initialization error: %s", err)
- }
+ zr := zlibReaderPool.Get().(io.ReadCloser)
+ defer zlibReaderPool.Put(zr)
- s.zr = zr.(readerResetter)
- } else {
- if err = s.zr.Reset(s.r, nil); err != nil {
- return 0, fmt.Errorf("zlib reset error: %s", err)
- }
+ if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
+ return 0, fmt.Errorf("zlib reset error: %s", err)
}
- defer ioutil.CheckClose(s.zr, &err)
+ defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
- n, err = io.CopyBuffer(w, s.zr, buf)
+ n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
return
}
@@ -378,110 +378,89 @@ func (s *Scanner) Close() error {
return err
}
-// Flush finishes writing the buffer to crc hasher in case we are using
-// a teeReader. Otherwise it is a no-op.
+// Flush is a no-op (deprecated)
func (s *Scanner) Flush() error {
- tee, ok := s.r.(*teeReader)
- if ok {
- return tee.Flush()
- }
return nil
}
-type trackableReader struct {
- count int64
- io.Reader
+// scannerReader has the following characteristics:
+// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
+// reader supports it.
+// - Keeps track of the current read position, for when the underlying reader
+// isn't an io.SeekReader, but we still want to know the current offset.
+// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
+// The buffer helps avoid a performance penality for performing small writes
+// to the crc32 hash writer.
+type scannerReader struct {
+ reader io.Reader
+ crc io.Writer
+ rbuf *bufio.Reader
+ wbuf *bufio.Writer
+ offset int64
}
-// Read reads up to len(p) bytes into p.
-func (r *trackableReader) Read(p []byte) (n int, err error) {
- n, err = r.Reader.Read(p)
- r.count += int64(n)
-
- return
-}
-
-// Seek only supports io.SeekCurrent, any other operation fails
-func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
- if whence != io.SeekCurrent {
- return -1, ErrSeekNotSupported
+func newScannerReader(r io.Reader, h io.Writer) *scannerReader {
+ sr := &scannerReader{
+ rbuf: bufio.NewReader(nil),
+ wbuf: bufio.NewWriterSize(nil, 64),
+ crc: h,
}
+ sr.Reset(r)
- return r.count, nil
+ return sr
}
-func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker {
- return &bufferedSeeker{
- r: r,
- Reader: *bufio.NewReader(r),
- }
-}
+func (r *scannerReader) Reset(reader io.Reader) {
+ r.reader = reader
+ r.rbuf.Reset(r.reader)
+ r.wbuf.Reset(r.crc)
-type bufferedSeeker struct {
- r io.ReadSeeker
- bufio.Reader
-}
-
-func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
- if whence == io.SeekCurrent && offset == 0 {
- current, err := r.r.Seek(offset, whence)
- if err != nil {
- return current, err
- }
-
- return current - int64(r.Buffered()), nil
+ r.offset = 0
+ if seeker, ok := r.reader.(io.ReadSeeker); ok {
+ r.offset, _ = seeker.Seek(0, io.SeekCurrent)
}
-
- defer r.Reader.Reset(r.r)
- return r.r.Seek(offset, whence)
}
-type readerResetter interface {
- io.ReadCloser
- zlib.Resetter
-}
+func (r *scannerReader) Read(p []byte) (n int, err error) {
+ n, err = r.rbuf.Read(p)
-type reader interface {
- io.Reader
- io.ByteReader
- io.Seeker
+ r.offset += int64(n)
+ if _, err := r.wbuf.Write(p[:n]); err != nil {
+ return n, err
+ }
+ return
}
-type teeReader struct {
- reader
- w hash.Hash32
- bufWriter *bufio.Writer
+func (r *scannerReader) ReadByte() (b byte, err error) {
+ b, err = r.rbuf.ReadByte()
+ if err == nil {
+ r.offset++
+ return b, r.wbuf.WriteByte(b)
+ }
+ return
}
-func newTeeReader(r reader, h hash.Hash32) *teeReader {
- return &teeReader{
- reader: r,
- w: h,
- bufWriter: bufio.NewWriter(h),
- }
+func (r *scannerReader) Flush() error {
+ return r.wbuf.Flush()
}
-func (r *teeReader) Read(p []byte) (n int, err error) {
- r.Flush()
+// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker,
+// then only whence=io.SeekCurrent is supported, any other operation fails.
+func (r *scannerReader) Seek(offset int64, whence int) (int64, error) {
+ var err error
- n, err = r.reader.Read(p)
- if n > 0 {
- if n, err := r.w.Write(p[:n]); err != nil {
- return n, err
+ if seeker, ok := r.reader.(io.ReadSeeker); !ok {
+ if whence != io.SeekCurrent || offset != 0 {
+ return -1, ErrSeekNotSupported
+ }
+ } else {
+ if whence == io.SeekCurrent && offset == 0 {
+ return r.offset, nil
}
- }
- return
-}
-func (r *teeReader) ReadByte() (b byte, err error) {
- b, err = r.reader.ReadByte()
- if err == nil {
- return b, r.bufWriter.WriteByte(b)
+ r.offset, err = seeker.Seek(offset, whence)
+ r.rbuf.Reset(r.reader)
}
- return
-}
-
-func (r *teeReader) Flush() (err error) {
- return r.bufWriter.Flush()
+ return r.offset, err
}
diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go
index 091b457..a401d6d 100644
--- a/plumbing/format/packfile/scanner_test.go
+++ b/plumbing/format/packfile/scanner_test.go
@@ -135,6 +135,55 @@ func (s *ScannerSuite) TestSeekObjectHeaderNonSeekable(c *C) {
c.Assert(err, Equals, ErrSeekNotSupported)
}
+func (s *ScannerSuite) TestReaderReset(c *C) {
+ r := fixtures.Basic().One().Packfile()
+ p := NewScanner(r)
+
+ version, objects, err := p.Header()
+ c.Assert(version, Equals, VersionSupported)
+ c.Assert(objects, Equals, uint32(31))
+
+ h, err := p.SeekObjectHeader(expectedHeadersOFS[0].Offset)
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[0])
+
+ p.Reset(r)
+ c.Assert(p.pendingObject, IsNil)
+ c.Assert(p.version, Equals, uint32(0))
+ c.Assert(p.objects, Equals, uint32(0))
+ c.Assert(p.r.reader, Equals, r)
+ c.Assert(p.r.offset > expectedHeadersOFS[0].Offset, Equals, true)
+
+ p.Reset(bytes.NewReader(nil))
+ c.Assert(p.r.offset, Equals, int64(0))
+}
+
+func (s *ScannerSuite) TestReaderResetSeeks(c *C) {
+ r := fixtures.Basic().One().Packfile()
+
+ // seekable
+ p := NewScanner(r)
+ c.Assert(p.IsSeekable, Equals, true)
+ h, err := p.SeekObjectHeader(expectedHeadersOFS[0].Offset)
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[0])
+
+ // reset with seekable
+ p.Reset(r)
+ c.Assert(p.IsSeekable, Equals, true)
+ h, err = p.SeekObjectHeader(expectedHeadersOFS[1].Offset)
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[1])
+
+ // reset with non-seekable
+ f := fixtures.Basic().ByTag("ref-delta").One()
+ p.Reset(io.MultiReader(f.Packfile()))
+ c.Assert(p.IsSeekable, Equals, false)
+
+ _, err = p.SeekObjectHeader(expectedHeadersOFS[4].Offset)
+ c.Assert(err, Equals, ErrSeekNotSupported)
+}
+
var expectedHeadersOFS = []ObjectHeader{
{Type: plumbing.CommitObject, Offset: 12, Length: 254},
{Type: plumbing.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12},