aboutsummaryrefslogtreecommitdiffstats
path: root/plumbing
diff options
context:
space:
mode:
authorAndrew Thornton <art27@cantab.net>2023-10-08 15:49:51 +0100
committerAndrew Thornton <art27@cantab.net>2023-10-12 16:40:38 +0100
commit69b88d9bda44ebfe1d56a7624b956d9e20818c0e (patch)
treeb6a17322552d0cdade956d3c4e0d6c14e49cf60e /plumbing
parent623c6df4280e22f88f4aabc3c0a8ae2808d33a1b (diff)
downloadgo-git-69b88d9bda44ebfe1d56a7624b956d9e20818c0e.tar.gz
plumbing: commitgraph, Add generation v2 support
This PR adds in support for generation v2 support and a couple of new walkers to match --date-order etc options on log. This PR also fixes a bug in the chain code and adds more tests. Signed-off-by: Andrew Thornton <art27@cantab.net>
Diffstat (limited to 'plumbing')
-rw-r--r--plumbing/format/commitgraph/v2/chunk.go7
-rw-r--r--plumbing/format/commitgraph/v2/commitgraph.go17
-rw-r--r--plumbing/format/commitgraph/v2/commitgraph_test.go35
-rw-r--r--plumbing/format/commitgraph/v2/encoder.go84
-rw-r--r--plumbing/format/commitgraph/v2/file.go144
-rw-r--r--plumbing/format/commitgraph/v2/memory.go22
-rw-r--r--plumbing/object/commitgraph/commitnode.go4
-rw-r--r--plumbing/object/commitgraph/commitnode_graph.go7
-rw-r--r--plumbing/object/commitgraph/commitnode_object.go7
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_author_order.go61
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_ctime.go3
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_date_order.go41
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_helper.go164
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_test.go187
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_topo_order.go161
15 files changed, 889 insertions, 55 deletions
diff --git a/plumbing/format/commitgraph/v2/chunk.go b/plumbing/format/commitgraph/v2/chunk.go
index ab24320..11f4d31 100644
--- a/plumbing/format/commitgraph/v2/chunk.go
+++ b/plumbing/format/commitgraph/v2/chunk.go
@@ -3,7 +3,7 @@ package v2
import "bytes"
const (
- chunkSigLen = 4 // Length of a chunk signature
+ szChunkSig = 4 // Length of a chunk signature
chunkSigOffset = 4 // Offset of each chunk signature in chunkSignatures
)
@@ -28,14 +28,15 @@ const (
BaseGraphsListChunk // "BASE"
ZeroChunk // "\000\000\000\000"
)
+const lenChunks = int(ZeroChunk) // ZeroChunk is not a valid chunk type, but it is used to determine the length of the chunk type list.
// Signature returns the byte signature for the chunk type.
func (ct ChunkType) Signature() []byte {
if ct >= BaseGraphsListChunk || ct < 0 { // not a valid chunk type just return ZeroChunk
- return chunkSignatures[ZeroChunk*chunkSigOffset : ZeroChunk*chunkSigOffset+chunkSigLen]
+ return chunkSignatures[ZeroChunk*chunkSigOffset : ZeroChunk*chunkSigOffset+szChunkSig]
}
- return chunkSignatures[ct*chunkSigOffset : ct*chunkSigOffset+chunkSigLen]
+ return chunkSignatures[ct*chunkSigOffset : ct*chunkSigOffset+szChunkSig]
}
// ChunkTypeFromBytes returns the chunk type for the given byte signature.
diff --git a/plumbing/format/commitgraph/v2/commitgraph.go b/plumbing/format/commitgraph/v2/commitgraph.go
index 7c67b63..9c89cd9 100644
--- a/plumbing/format/commitgraph/v2/commitgraph.go
+++ b/plumbing/format/commitgraph/v2/commitgraph.go
@@ -2,6 +2,7 @@ package v2
import (
"io"
+ "math"
"time"
"github.com/go-git/go-git/v5/plumbing"
@@ -19,10 +20,22 @@ type CommitData struct {
// Generation number is the pre-computed generation in the commit graph
// or zero if not available.
Generation uint64
+ // GenerationV2 stores the corrected commit date for the commits
+ // It combines the contents of the GDA2 and GDO2 sections of the commit-graph
+ // with the commit time portion of the CDAT section.
+ GenerationV2 uint64
// When is the timestamp of the commit.
When time.Time
}
+// GenerationV2Data returns the corrected commit date for the commits
+func (c *CommitData) GenerationV2Data() uint64 {
+ if c.GenerationV2 == 0 || c.GenerationV2 == math.MaxUint64 {
+ return 0
+ }
+ return c.GenerationV2 - uint64(c.When.Unix())
+}
+
// Index represents a representation of commit graph that allows indexed
// access to the nodes using commit object hash
type Index interface {
@@ -35,6 +48,10 @@ type Index interface {
GetCommitDataByIndex(i uint32) (*CommitData, error)
// Hashes returns all the hashes that are available in the index
Hashes() []plumbing.Hash
+ // HasGenerationV2 returns true if the commit graph has the corrected commit date data
+ HasGenerationV2() bool
+ // MaximumNumberOfHashes returns the maximum number of hashes within the index
+ MaximumNumberOfHashes() uint32
io.Closer
}
diff --git a/plumbing/format/commitgraph/v2/commitgraph_test.go b/plumbing/format/commitgraph/v2/commitgraph_test.go
index 69fdcd9..1278405 100644
--- a/plumbing/format/commitgraph/v2/commitgraph_test.go
+++ b/plumbing/format/commitgraph/v2/commitgraph_test.go
@@ -7,7 +7,11 @@ import (
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/util"
"github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/cache"
commitgraph "github.com/go-git/go-git/v5/plumbing/format/commitgraph/v2"
+ "github.com/go-git/go-git/v5/plumbing/format/packfile"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-git/go-git/v5/storage/filesystem"
fixtures "github.com/go-git/go-git-fixtures/v4"
. "gopkg.in/check.v1"
@@ -76,6 +80,37 @@ func testDecodeHelper(c *C, index commitgraph.Index) {
c.Assert(hashes[10].String(), Equals, "e713b52d7e13807e87a002e812041f248db3f643")
}
+func (s *CommitgraphSuite) TestDecodeMultiChain(c *C) {
+ fixtures.ByTag("commit-graph-chain-2").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+ index, err := commitgraph.OpenChainOrFileIndex(dotgit)
+ c.Assert(err, IsNil)
+ defer index.Close()
+ storer := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
+ p := f.Packfile()
+ defer p.Close()
+ packfile.UpdateObjectStorage(storer, p)
+
+ for idx, hash := range index.Hashes() {
+ idx2, err := index.GetIndexByHash(hash)
+ c.Assert(err, IsNil)
+ c.Assert(idx2, Equals, uint32(idx))
+ hash2, err := index.GetHashByIndex(idx2)
+ c.Assert(err, IsNil)
+ c.Assert(hash2.String(), Equals, hash.String())
+
+ commitData, err := index.GetCommitDataByIndex(uint32(idx))
+ c.Assert(err, IsNil)
+ commit, err := object.GetCommit(storer, hash)
+ c.Assert(err, IsNil)
+
+ for i, parent := range commit.ParentHashes {
+ c.Assert(hash.String()+":"+parent.String(), Equals, hash.String()+":"+commitData.ParentHashes[i].String())
+ }
+ }
+ })
+}
+
func (s *CommitgraphSuite) TestDecode(c *C) {
fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
dotgit := f.DotGit()
diff --git a/plumbing/format/commitgraph/v2/encoder.go b/plumbing/format/commitgraph/v2/encoder.go
index d1e41f8..b79bc77 100644
--- a/plumbing/format/commitgraph/v2/encoder.go
+++ b/plumbing/format/commitgraph/v2/encoder.go
@@ -3,6 +3,7 @@ package v2
import (
"crypto"
"io"
+ "math"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
@@ -28,13 +29,21 @@ func (e *Encoder) Encode(idx Index) error {
hashes := idx.Hashes()
// Sort the inout and prepare helper structures we'll need for encoding
- hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
+ hashToIndex, fanout, extraEdgesCount, generationV2OverflowCount := e.prepare(idx, hashes)
chunkSignatures := [][]byte{OIDFanoutChunk.Signature(), OIDLookupChunk.Signature(), CommitDataChunk.Signature()}
- chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * hash.Size, uint64(len(hashes)) * (hash.Size + commitDataSize)}
+ chunkSizes := []uint64{szUint32 * lenFanout, uint64(len(hashes)) * hash.Size, uint64(len(hashes)) * (hash.Size + szCommitData)}
if extraEdgesCount > 0 {
chunkSignatures = append(chunkSignatures, ExtraEdgeListChunk.Signature())
- chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
+ chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*szUint32)
+ }
+ if idx.HasGenerationV2() {
+ chunkSignatures = append(chunkSignatures, GenerationDataChunk.Signature())
+ chunkSizes = append(chunkSizes, uint64(len(hashes))*szUint32)
+ if generationV2OverflowCount > 0 {
+ chunkSignatures = append(chunkSignatures, GenerationDataOverflowChunk.Signature())
+ chunkSizes = append(chunkSizes, uint64(generationV2OverflowCount)*szUint64)
+ }
}
if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
@@ -49,38 +58,52 @@ func (e *Encoder) Encode(idx Index) error {
if err := e.encodeOidLookup(hashes); err != nil {
return err
}
- if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
- if err = e.encodeExtraEdges(extraEdges); err != nil {
+
+ extraEdges, generationV2Data, err := e.encodeCommitData(hashes, hashToIndex, idx)
+ if err != nil {
+ return err
+ }
+ if err = e.encodeExtraEdges(extraEdges); err != nil {
+ return err
+ }
+ if idx.HasGenerationV2() {
+ overflows, err := e.encodeGenerationV2Data(generationV2Data)
+ if err != nil {
+ return err
+ }
+ if err = e.encodeGenerationV2Overflow(overflows); err != nil {
return err
}
- } else {
- return err
}
return e.encodeChecksum()
}
-func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32) {
+func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32, generationV2OverflowCount uint32) {
// Sort the hashes and build our index
plumbing.HashesSort(hashes)
hashToIndex = make(map[plumbing.Hash]uint32)
- fanout = make([]uint32, 256)
+ fanout = make([]uint32, lenFanout)
for i, hash := range hashes {
hashToIndex[hash] = uint32(i)
fanout[hash[0]]++
}
// Convert the fanout to cumulative values
- for i := 1; i <= 0xff; i++ {
+ for i := 1; i < lenFanout; i++ {
fanout[i] += fanout[i-1]
}
+ hasGenerationV2 := idx.HasGenerationV2()
+
// Find out if we will need extra edge table
for i := 0; i < len(hashes); i++ {
v, _ := idx.GetCommitDataByIndex(uint32(i))
if len(v.ParentHashes) > 2 {
extraEdgesCount += uint32(len(v.ParentHashes) - 1)
- break
+ }
+ if hasGenerationV2 && v.GenerationV2Data() > math.MaxUint32 {
+ generationV2OverflowCount++
}
}
@@ -100,7 +123,7 @@ func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
// 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
- offset := uint64(8 + len(chunkSignatures)*12 + 12)
+ offset := uint64(szSignature + szHeader + (len(chunkSignatures)+1)*(szChunkSig+szUint64))
for i, signature := range chunkSignatures {
if _, err = e.Write(signature); err == nil {
err = binary.WriteUint64(e, offset)
@@ -134,7 +157,10 @@ func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
return
}
-func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, err error) {
+func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, generationV2Data []uint64, err error) {
+ if idx.HasGenerationV2() {
+ generationV2Data = make([]uint64, 0, len(hashes))
+ }
for _, hash := range hashes {
origIndex, _ := idx.GetIndexByHash(hash)
commitData, _ := idx.GetCommitDataByIndex(origIndex)
@@ -173,6 +199,9 @@ func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumb
if err = binary.WriteUint64(e, unixTime); err != nil {
return
}
+ if generationV2Data != nil {
+ generationV2Data = append(generationV2Data, commitData.GenerationV2Data())
+ }
}
return
}
@@ -186,6 +215,35 @@ func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
return
}
+func (e *Encoder) encodeGenerationV2Data(generationV2Data []uint64) (overflows []uint64, err error) {
+ head := 0
+ for _, data := range generationV2Data {
+ if data >= 0x80000000 {
+ // overflow
+ if err = binary.WriteUint32(e, uint32(head)|0x80000000); err != nil {
+ return nil, err
+ }
+ generationV2Data[head] = data
+ head++
+ continue
+ }
+ if err = binary.WriteUint32(e, uint32(data)); err != nil {
+ return nil, err
+ }
+ }
+
+ return generationV2Data[:head], nil
+}
+
+func (e *Encoder) encodeGenerationV2Overflow(overflows []uint64) (err error) {
+ for _, overflow := range overflows {
+ if err = binary.WriteUint64(e, overflow); err != nil {
+ return
+ }
+ }
+ return
+}
+
func (e *Encoder) encodeChecksum() error {
_, err := e.Write(e.hash.Sum(nil)[:hash.Size])
return err
diff --git a/plumbing/format/commitgraph/v2/file.go b/plumbing/format/commitgraph/v2/file.go
index 69e0250..c5f61e4 100644
--- a/plumbing/format/commitgraph/v2/file.go
+++ b/plumbing/format/commitgraph/v2/file.go
@@ -34,14 +34,23 @@ var (
)
const (
- commitDataSize = 16
+ szUint32 = 4
+ szUint64 = 8
+
+ szSignature = 4
+ szHeader = 4
+ szCommitData = 2*szUint32 + szUint64
+
+ lenFanout = 256
)
type fileIndex struct {
- reader ReaderAtCloser
- fanout [256]uint32
- offsets [9]int64
- parent Index
+ reader ReaderAtCloser
+ fanout [lenFanout]uint32
+ offsets [lenChunks]int64
+ parent Index
+ hasGenerationV2 bool
+ minimumNumberOfHashes uint32
}
// ReaderAtCloser is an interface that combines io.ReaderAt and io.Closer.
@@ -74,6 +83,15 @@ func OpenFileIndexWithParent(reader ReaderAtCloser, parent Index) (Index, error)
return nil, err
}
+ fi.hasGenerationV2 = fi.offsets[GenerationDataChunk] > 0
+ if fi.parent != nil {
+ fi.hasGenerationV2 = fi.hasGenerationV2 && fi.parent.HasGenerationV2()
+ }
+
+ if fi.parent != nil {
+ fi.minimumNumberOfHashes = fi.parent.MaximumNumberOfHashes()
+ }
+
return fi, nil
}
@@ -94,7 +112,7 @@ func (fi *fileIndex) Close() (err error) {
func (fi *fileIndex) verifyFileHeader() error {
// Verify file signature
- signature := make([]byte, 4)
+ signature := make([]byte, szSignature)
if _, err := fi.reader.ReadAt(signature, 0); err != nil {
return err
}
@@ -103,8 +121,8 @@ func (fi *fileIndex) verifyFileHeader() error {
}
// Read and verify the file header
- header := make([]byte, 4)
- if _, err := fi.reader.ReadAt(header, 4); err != nil {
+ header := make([]byte, szHeader)
+ if _, err := fi.reader.ReadAt(header, szHeader); err != nil {
return err
}
if header[0] != 1 {
@@ -120,10 +138,11 @@ func (fi *fileIndex) verifyFileHeader() error {
}
func (fi *fileIndex) readChunkHeaders() error {
- chunkID := make([]byte, 4)
+ // The chunk table is a list of 4-byte chunk signatures and uint64 offsets into the file
+ chunkID := make([]byte, szChunkSig)
for i := 0; ; i++ {
- chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
- if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
+ chunkHeader := io.NewSectionReader(fi.reader, szSignature+szHeader+(int64(i)*(szChunkSig+szUint64)), szChunkSig+szUint64)
+ if _, err := io.ReadAtLeast(chunkHeader, chunkID, szChunkSig); err != nil {
return err
}
chunkOffset, err := binary.ReadUint64(chunkHeader)
@@ -149,7 +168,9 @@ func (fi *fileIndex) readChunkHeaders() error {
}
func (fi *fileIndex) readFanout() error {
- fanoutReader := io.NewSectionReader(fi.reader, fi.offsets[OIDFanoutChunk], 256*4)
+ // The Fanout table is a 256 entry table of the number (as uint32) of OIDs with first byte at most i.
+ // Thus F[255] stores the total number of commits (N)
+ fanoutReader := io.NewSectionReader(fi.reader, fi.offsets[OIDFanoutChunk], lenFanout*szUint32)
for i := 0; i < 256; i++ {
fanoutValue, err := binary.ReadUint32(fanoutReader)
if err != nil {
@@ -185,7 +206,7 @@ func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (uint32, error) {
if cmp < 0 {
high = mid
} else if cmp == 0 {
- return mid, nil
+ return mid + fi.minimumNumberOfHashes, nil
} else {
low = mid + 1
}
@@ -196,7 +217,7 @@ func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (uint32, error) {
if err != nil {
return 0, err
}
- return idx + fi.fanout[0xff], nil
+ return idx, nil
}
return 0, plumbing.ErrObjectNotFound
@@ -204,23 +225,24 @@ func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (uint32, error) {
// GetCommitDataByIndex returns the commit data for the given index in the commit-graph.
func (fi *fileIndex) GetCommitDataByIndex(idx uint32) (*CommitData, error) {
- if idx >= fi.fanout[0xff] {
+ if idx < fi.minimumNumberOfHashes {
if fi.parent != nil {
- data, err := fi.parent.GetCommitDataByIndex(idx - fi.fanout[0xff])
+ data, err := fi.parent.GetCommitDataByIndex(idx)
if err != nil {
return nil, err
}
- for i := range data.ParentIndexes {
- data.ParentIndexes[i] += fi.fanout[0xff]
- }
return data, nil
}
return nil, plumbing.ErrObjectNotFound
}
+ idx -= fi.minimumNumberOfHashes
+ if idx >= fi.fanout[0xff] {
+ return nil, plumbing.ErrObjectNotFound
+ }
- offset := fi.offsets[CommitDataChunk] + int64(idx)*(hash.Size+commitDataSize)
- commitDataReader := io.NewSectionReader(fi.reader, offset, hash.Size+commitDataSize)
+ offset := fi.offsets[CommitDataChunk] + int64(idx)*(hash.Size+szCommitData)
+ commitDataReader := io.NewSectionReader(fi.reader, offset, hash.Size+szCommitData)
treeHash, err := binary.ReadHash(commitDataReader)
if err != nil {
@@ -241,10 +263,11 @@ func (fi *fileIndex) GetCommitDataByIndex(idx uint32) (*CommitData, error) {
var parentIndexes []uint32
if parent2&parentOctopusUsed == parentOctopusUsed {
- // Octopus merge
+ // Octopus merge - Look-up the extra parents from the extra edge list
+ // The extra edge list is a list of uint32s, each of which is an index into the Commit Data table, terminated by a index with the most significant bit on.
parentIndexes = []uint32{parent1 & parentOctopusMask}
- offset := fi.offsets[ExtraEdgeListChunk] + 4*int64(parent2&parentOctopusMask)
- buf := make([]byte, 4)
+ offset := fi.offsets[ExtraEdgeListChunk] + szUint32*int64(parent2&parentOctopusMask)
+ buf := make([]byte, szUint32)
for {
_, err := fi.reader.ReadAt(buf, offset)
if err != nil {
@@ -252,7 +275,7 @@ func (fi *fileIndex) GetCommitDataByIndex(idx uint32) (*CommitData, error) {
}
parent := encbin.BigEndian.Uint32(buf)
- offset += 4
+ offset += szUint32
parentIndexes = append(parentIndexes, parent&parentOctopusMask)
if parent&parentLast == parentLast {
break
@@ -269,23 +292,57 @@ func (fi *fileIndex) GetCommitDataByIndex(idx uint32) (*CommitData, error) {
return nil, err
}
+ generationV2 := uint64(0)
+
+ if fi.hasGenerationV2 {
+ // set the GenerationV2 result to the commit time
+ generationV2 = uint64(genAndTime & 0x3FFFFFFFF)
+
+ // Next read the generation (offset) data from the generation data chunk
+ offset := fi.offsets[GenerationDataChunk] + int64(idx)*szUint32
+ buf := make([]byte, szUint32)
+ if _, err := fi.reader.ReadAt(buf, offset); err != nil {
+ return nil, err
+ }
+ genV2Data := encbin.BigEndian.Uint32(buf)
+
+ // check if the data is an overflow that needs to be looked up in the overflow chunk
+ if genV2Data&0x80000000 > 0 {
+ // Overflow
+ offset := fi.offsets[GenerationDataOverflowChunk] + int64(genV2Data&0x7fffffff)*szUint64
+ buf := make([]byte, 8)
+ if _, err := fi.reader.ReadAt(buf, offset); err != nil {
+ return nil, err
+ }
+
+ generationV2 += encbin.BigEndian.Uint64(buf)
+ } else {
+ generationV2 += uint64(genV2Data)
+ }
+ }
+
return &CommitData{
TreeHash: treeHash,
ParentIndexes: parentIndexes,
ParentHashes: parentHashes,
Generation: genAndTime >> 34,
+ GenerationV2: generationV2,
When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
}, nil
}
// GetHashByIndex looks up the hash for the given index in the commit-graph.
func (fi *fileIndex) GetHashByIndex(idx uint32) (found plumbing.Hash, err error) {
- if idx >= fi.fanout[0xff] {
+ if idx < fi.minimumNumberOfHashes {
if fi.parent != nil {
- return fi.parent.GetHashByIndex(idx - fi.fanout[0xff])
+ return fi.parent.GetHashByIndex(idx)
}
return found, ErrMalformedCommitGraphFile
}
+ idx -= fi.minimumNumberOfHashes
+ if idx >= fi.fanout[0xff] {
+ return found, ErrMalformedCommitGraphFile
+ }
offset := fi.offsets[OIDLookupChunk] + int64(idx)*hash.Size
if _, err := fi.reader.ReadAt(found[:], offset); err != nil {
@@ -299,9 +356,9 @@ func (fi *fileIndex) getHashesFromIndexes(indexes []uint32) ([]plumbing.Hash, er
hashes := make([]plumbing.Hash, len(indexes))
for i, idx := range indexes {
- if idx >= fi.fanout[0xff] {
+ if idx < fi.minimumNumberOfHashes {
if fi.parent != nil {
- hash, err := fi.parent.GetHashByIndex(idx - fi.fanout[0xff])
+ hash, err := fi.parent.GetHashByIndex(idx)
if err != nil {
return nil, err
}
@@ -312,6 +369,11 @@ func (fi *fileIndex) getHashesFromIndexes(indexes []uint32) ([]plumbing.Hash, er
return nil, ErrMalformedCommitGraphFile
}
+ idx -= fi.minimumNumberOfHashes
+ if idx >= fi.fanout[0xff] {
+ return nil, ErrMalformedCommitGraphFile
+ }
+
offset := fi.offsets[OIDLookupChunk] + int64(idx)*hash.Size
if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
return nil, err
@@ -323,16 +385,28 @@ func (fi *fileIndex) getHashesFromIndexes(indexes []uint32) ([]plumbing.Hash, er
// Hashes returns all the hashes that are available in the index.
func (fi *fileIndex) Hashes() []plumbing.Hash {
- hashes := make([]plumbing.Hash, fi.fanout[0xff])
+ hashes := make([]plumbing.Hash, fi.fanout[0xff]+fi.minimumNumberOfHashes)
+ for i := uint32(0); i < fi.minimumNumberOfHashes; i++ {
+ hash, err := fi.parent.GetHashByIndex(i)
+ if err != nil {
+ return nil
+ }
+ hashes[i] = hash
+ }
+
for i := uint32(0); i < fi.fanout[0xff]; i++ {
offset := fi.offsets[OIDLookupChunk] + int64(i)*hash.Size
- if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < hash.Size {
+ if n, err := fi.reader.ReadAt(hashes[i+fi.minimumNumberOfHashes][:], offset); err != nil || n < hash.Size {
return nil
}
}
- if fi.parent != nil {
- parentHashes := fi.parent.Hashes()
- hashes = append(hashes, parentHashes...)
- }
return hashes
}
+
+func (fi *fileIndex) HasGenerationV2() bool {
+ return fi.hasGenerationV2
+}
+
+func (fi *fileIndex) MaximumNumberOfHashes() uint32 {
+ return fi.minimumNumberOfHashes + fi.fanout[0xff]
+}
diff --git a/plumbing/format/commitgraph/v2/memory.go b/plumbing/format/commitgraph/v2/memory.go
index ab7ddfa..8de0c5f 100644
--- a/plumbing/format/commitgraph/v2/memory.go
+++ b/plumbing/format/commitgraph/v2/memory.go
@@ -1,14 +1,17 @@
package v2
import (
+ "math"
+
"github.com/go-git/go-git/v5/plumbing"
)
// MemoryIndex provides a way to build the commit-graph in memory
// for later encoding to file.
type MemoryIndex struct {
- commitData []commitData
- indexMap map[plumbing.Hash]uint32
+ commitData []commitData
+ indexMap map[plumbing.Hash]uint32
+ hasGenerationV2 bool
}
type commitData struct {
@@ -19,7 +22,8 @@ type commitData struct {
// NewMemoryIndex creates in-memory commit graph representation
func NewMemoryIndex() *MemoryIndex {
return &MemoryIndex{
- indexMap: make(map[plumbing.Hash]uint32),
+ indexMap: make(map[plumbing.Hash]uint32),
+ hasGenerationV2: true,
}
}
@@ -83,9 +87,21 @@ func (mi *MemoryIndex) Add(hash plumbing.Hash, data *CommitData) {
data.ParentIndexes = nil
mi.indexMap[hash] = uint32(len(mi.commitData))
mi.commitData = append(mi.commitData, commitData{Hash: hash, CommitData: data})
+ if data.GenerationV2 == math.MaxUint64 { // if GenerationV2 is not available reset it to zero
+ data.GenerationV2 = 0
+ }
+ mi.hasGenerationV2 = mi.hasGenerationV2 && data.GenerationV2 != 0
+}
+
+func (mi *MemoryIndex) HasGenerationV2() bool {
+ return mi.hasGenerationV2
}
// Close closes the index
func (mi *MemoryIndex) Close() error {
return nil
}
+
+func (mi *MemoryIndex) MaximumNumberOfHashes() uint32 {
+ return uint32(len(mi.indexMap))
+}
diff --git a/plumbing/object/commitgraph/commitnode.go b/plumbing/object/commitgraph/commitnode.go
index d92c906..47227d4 100644
--- a/plumbing/object/commitgraph/commitnode.go
+++ b/plumbing/object/commitgraph/commitnode.go
@@ -29,6 +29,10 @@ type CommitNode interface {
// Generation returns the generation of the commit for reachability analysis.
// Objects with newer generation are not reachable from objects of older generation.
Generation() uint64
+ // GenerationV2 stores the corrected commit date for the commits
+ // It combines the contents of the GDA2 and GDO2 sections of the commit-graph
+ // with the commit time portion of the CDAT section.
+ GenerationV2() uint64
// Commit returns the full commit object from the node
Commit() (*object.Commit, error)
}
diff --git a/plumbing/object/commitgraph/commitnode_graph.go b/plumbing/object/commitgraph/commitnode_graph.go
index 252b518..0f51e3b 100644
--- a/plumbing/object/commitgraph/commitnode_graph.go
+++ b/plumbing/object/commitgraph/commitnode_graph.go
@@ -120,6 +120,13 @@ func (c *graphCommitNode) Generation() uint64 {
return c.commitData.Generation
}
+func (c *graphCommitNode) GenerationV2() uint64 {
+ // If the commit-graph file was generated with older Git version that
+ // set the generation to zero for every commit the generation assumption
+ // is still valid. It is just less useful.
+ return c.commitData.GenerationV2
+}
+
func (c *graphCommitNode) Commit() (*object.Commit, error) {
return object.GetCommit(c.gci.s, c.hash)
}
diff --git a/plumbing/object/commitgraph/commitnode_object.go b/plumbing/object/commitgraph/commitnode_object.go
index 1bd37e3..7256bed 100644
--- a/plumbing/object/commitgraph/commitnode_object.go
+++ b/plumbing/object/commitgraph/commitnode_object.go
@@ -85,6 +85,13 @@ func (c *objectCommitNode) Generation() uint64 {
return math.MaxUint64
}
+func (c *objectCommitNode) GenerationV2() uint64 {
+ // Commit nodes representing objects outside of the commit graph can never
+ // be reached by objects from the commit-graph thus we return the highest
+ // possible value.
+ return math.MaxUint64
+}
+
func (c *objectCommitNode) Commit() (*object.Commit, error) {
return c.commit, nil
}
diff --git a/plumbing/object/commitgraph/commitnode_walker_author_order.go b/plumbing/object/commitgraph/commitnode_walker_author_order.go
new file mode 100644
index 0000000..f5b23cc
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_author_order.go
@@ -0,0 +1,61 @@
+package commitgraph
+
+import (
+ "github.com/go-git/go-git/v5/plumbing"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+)
+
+// NewCommitNodeIterAuthorDateOrder returns a CommitNodeIter that walks the commit history,
+// starting at the given commit and visiting its parents in Author Time order but with the
+// constraint that no parent is emitted before its children are emitted.
+//
+// This matches `git log --author-order`
+//
+// This ordering requires that commit objects need to be loaded into memory - thus this
+// ordering is likely to be slower than other orderings.
+func NewCommitNodeIterAuthorDateOrder(c CommitNode,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitNodeIter {
+ seen := make(map[plumbing.Hash]struct{})
+ for _, h := range ignore {
+ seen[h] = struct{}{}
+ }
+ for h, ext := range seenExternal {
+ if ext {
+ seen[h] = struct{}{}
+ }
+ }
+ inCounts := make(map[plumbing.Hash]int)
+
+ exploreHeap := &commitNodeHeap{binaryheap.NewWith(generationAndDateOrderComparator)}
+ exploreHeap.Push(c)
+
+ visitHeap := &commitNodeHeap{binaryheap.NewWith(func(left, right interface{}) int {
+ leftCommit, err := left.(CommitNode).Commit()
+ if err != nil {
+ return -1
+ }
+ rightCommit, err := right.(CommitNode).Commit()
+ if err != nil {
+ return -1
+ }
+
+ switch {
+ case rightCommit.Author.When.Before(leftCommit.Author.When):
+ return -1
+ case leftCommit.Author.When.Before(rightCommit.Author.When):
+ return 1
+ }
+ return 0
+ })}
+ visitHeap.Push(c)
+
+ return &commitNodeIteratorTopological{
+ exploreStack: exploreHeap,
+ visitStack: visitHeap,
+ inCounts: inCounts,
+ ignore: seen,
+ }
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_ctime.go b/plumbing/object/commitgraph/commitnode_walker_ctime.go
index c26873c..3ab9e6e 100644
--- a/plumbing/object/commitgraph/commitnode_walker_ctime.go
+++ b/plumbing/object/commitgraph/commitnode_walker_ctime.go
@@ -17,7 +17,8 @@ type commitNodeIteratorByCTime struct {
// NewCommitNodeIterCTime returns a CommitNodeIter that walks the commit history,
// starting at the given commit and visiting its parents while preserving Committer Time order.
-// this appears to be the closest order to `git log`
+// this is close in order to `git log` but does not guarantee topological order and will
+// order things incorrectly occasionally.
// The given callback will be called for each visited commit. Each commit will
// be visited only once. If the callback returns an error, walking will stop
// and will return the error. Other errors might be returned if the history
diff --git a/plumbing/object/commitgraph/commitnode_walker_date_order.go b/plumbing/object/commitgraph/commitnode_walker_date_order.go
new file mode 100644
index 0000000..659a4fa
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_date_order.go
@@ -0,0 +1,41 @@
+package commitgraph
+
+import (
+ "github.com/go-git/go-git/v5/plumbing"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+)
+
+// NewCommitNodeIterDateOrder returns a CommitNodeIter that walks the commit history,
+// starting at the given commit and visiting its parents in Committer Time and Generation order,
+// but with the constraint that no parent is emitted before its children are emitted.
+//
+// This matches `git log --date-order`
+func NewCommitNodeIterDateOrder(c CommitNode,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitNodeIter {
+ seen := make(map[plumbing.Hash]struct{})
+ for _, h := range ignore {
+ seen[h] = struct{}{}
+ }
+ for h, ext := range seenExternal {
+ if ext {
+ seen[h] = struct{}{}
+ }
+ }
+ inCounts := make(map[plumbing.Hash]int)
+
+ exploreHeap := &commitNodeHeap{binaryheap.NewWith(generationAndDateOrderComparator)}
+ exploreHeap.Push(c)
+
+ visitHeap := &commitNodeHeap{binaryheap.NewWith(generationAndDateOrderComparator)}
+ visitHeap.Push(c)
+
+ return &commitNodeIteratorTopological{
+ exploreStack: exploreHeap,
+ visitStack: visitHeap,
+ inCounts: inCounts,
+ ignore: seen,
+ }
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_helper.go b/plumbing/object/commitgraph/commitnode_walker_helper.go
new file mode 100644
index 0000000..c54f6ca
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_helper.go
@@ -0,0 +1,164 @@
+package commitgraph
+
+import (
+ "math"
+
+ "github.com/go-git/go-git/v5/plumbing"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+)
+
+// commitNodeStackable represents a common interface between heaps and stacks
+type commitNodeStackable interface {
+ Push(c CommitNode)
+ Pop() (CommitNode, bool)
+ Peek() (CommitNode, bool)
+ Size() int
+}
+
+// commitNodeLifo is a stack implementation using an underlying slice
+type commitNodeLifo struct {
+ l []CommitNode
+}
+
+// Push pushes a new CommitNode to the stack
+func (l *commitNodeLifo) Push(c CommitNode) {
+ l.l = append(l.l, c)
+}
+
+// Pop pops the most recently added CommitNode from the stack
+func (l *commitNodeLifo) Pop() (CommitNode, bool) {
+ if len(l.l) == 0 {
+ return nil, false
+ }
+ c := l.l[len(l.l)-1]
+ l.l = l.l[:len(l.l)-1]
+ return c, true
+}
+
+// Peek returns the most recently added CommitNode from the stack without removing it
+func (l *commitNodeLifo) Peek() (CommitNode, bool) {
+ if len(l.l) == 0 {
+ return nil, false
+ }
+ return l.l[len(l.l)-1], true
+}
+
+// Size returns the number of CommitNodes in the stack
+func (l *commitNodeLifo) Size() int {
+ return len(l.l)
+}
+
+// commitNodeHeap is a stack implementation using an underlying binary heap
+type commitNodeHeap struct {
+ *binaryheap.Heap
+}
+
+// Push pushes a new CommitNode to the heap
+func (h *commitNodeHeap) Push(c CommitNode) {
+ h.Heap.Push(c)
+}
+
+// Pop removes top element on heap and returns it, or nil if heap is empty.
+// Second return parameter is true, unless the heap was empty and there was nothing to pop.
+func (h *commitNodeHeap) Pop() (CommitNode, bool) {
+ c, ok := h.Heap.Pop()
+ if !ok {
+ return nil, false
+ }
+ return c.(CommitNode), true
+}
+
+// Peek returns top element on the heap without removing it, or nil if heap is empty.
+// Second return parameter is true, unless the heap was empty and there was nothing to peek.
+func (h *commitNodeHeap) Peek() (CommitNode, bool) {
+ c, ok := h.Heap.Peek()
+ if !ok {
+ return nil, false
+ }
+ return c.(CommitNode), true
+}
+
+// Size returns number of elements within the heap.
+func (h *commitNodeHeap) Size() int {
+ return h.Heap.Size()
+}
+
+// generationAndDateOrderComparator compares two CommitNode objects based on their generation and commit time.
+// If the left CommitNode object is in a higher generation or is newer than the right one, it returns a -1.
+// If the left CommitNode object is in a lower generation or is older than the right one, it returns a 1.
+// If the two CommitNode objects have the same commit time and generation, it returns 0.
+func generationAndDateOrderComparator(left, right interface{}) int {
+ leftCommit := left.(CommitNode)
+ rightCommit := right.(CommitNode)
+
+ // if GenerationV2 is MaxUint64, then the node is not in the graph
+ if leftCommit.GenerationV2() == math.MaxUint64 {
+ if rightCommit.GenerationV2() == math.MaxUint64 {
+ switch {
+ case rightCommit.CommitTime().Before(leftCommit.CommitTime()):
+ return -1
+ case leftCommit.CommitTime().Before(rightCommit.CommitTime()):
+ return 1
+ }
+ return 0
+ }
+ // left is not in the graph, but right is, so it is newer than the right
+ return -1
+ }
+
+ if rightCommit.GenerationV2() == math.MaxInt64 {
+ // the right is not in the graph, therefore the left is before the right
+ return 1
+ }
+
+ if leftCommit.GenerationV2() == 0 || rightCommit.GenerationV2() == 0 {
+ // We need to assess generation and date
+ if leftCommit.Generation() < rightCommit.Generation() {
+ return 1
+ }
+ if leftCommit.Generation() > rightCommit.Generation() {
+ return -1
+ }
+ switch {
+ case rightCommit.CommitTime().Before(leftCommit.CommitTime()):
+ return -1
+ case leftCommit.CommitTime().Before(rightCommit.CommitTime()):
+ return 1
+ }
+ return 0
+ }
+
+ if leftCommit.GenerationV2() < rightCommit.GenerationV2() {
+ return 1
+ }
+ if leftCommit.GenerationV2() > rightCommit.GenerationV2() {
+ return -1
+ }
+
+ return 0
+}
+
+// composeIgnores composes the ignore list with the provided seenExternal list
+func composeIgnores(ignore []plumbing.Hash, seenExternal map[plumbing.Hash]bool) map[plumbing.Hash]struct{} {
+ if len(ignore) == 0 {
+ seen := make(map[plumbing.Hash]struct{})
+ for h, ext := range seenExternal {
+ if ext {
+ seen[h] = struct{}{}
+ }
+ }
+ return seen
+ }
+
+ seen := make(map[plumbing.Hash]struct{})
+ for _, h := range ignore {
+ seen[h] = struct{}{}
+ }
+ for h, ext := range seenExternal {
+ if ext {
+ seen[h] = struct{}{}
+ }
+ }
+ return seen
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_test.go b/plumbing/object/commitgraph/commitnode_walker_test.go
new file mode 100644
index 0000000..1e09c0b
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_test.go
@@ -0,0 +1,187 @@
+package commitgraph
+
+import (
+ "strings"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ commitgraph "github.com/go-git/go-git/v5/plumbing/format/commitgraph/v2"
+
+ fixtures "github.com/go-git/go-git-fixtures/v4"
+ . "gopkg.in/check.v1"
+)
+
+func (s *CommitNodeSuite) TestCommitNodeIter(c *C) {
+ f := fixtures.ByTag("commit-graph-chain-2").One()
+
+ storer := unpackRepository(f)
+
+ index, err := commitgraph.OpenChainOrFileIndex(storer.Filesystem())
+ c.Assert(err, IsNil)
+
+ nodeIndex := NewGraphCommitNodeIndex(index, storer)
+
+ head, err := nodeIndex.Get(plumbing.NewHash("ec6f456c0e8c7058a29611429965aa05c190b54b"))
+ c.Assert(err, IsNil)
+
+ testTopoOrder(c, head)
+ testDateOrder(c, head)
+ testAuthorDateOrder(c, head)
+}
+
+func testTopoOrder(c *C, head CommitNode) {
+ iter := NewCommitNodeIterTopoOrder(
+ head,
+ nil,
+ nil,
+ )
+
+ var commits []string
+ iter.ForEach(func(c CommitNode) error {
+ commits = append(commits, c.ID().String())
+ return nil
+ })
+ c.Assert(commits, DeepEquals, strings.Split(`ec6f456c0e8c7058a29611429965aa05c190b54b
+d82f291cde9987322c8a0c81a325e1ba6159684c
+3048d280d2d5b258d9e582a226ff4bbed34fd5c9
+27aa8cdd2431068606741a589383c02c149ea625
+fa058d42fa3bc53f39108a56dad67157169b2191
+6c629843a1750a27c9af01ed2985f362f619c47a
+d10a0e7c1f340a6cfc14540a5f8c508ce7e2eabf
+d0a18ccd8eea3bdabc76d6dc5420af1ea30aae9f
+cf2874632223220e0445abf0a7806dc772c0b37a
+758ac33217f092bfcded4ad4774954ac054c9609
+214e1dca024fb6da5ed65564d2de734df5dc2127
+70923099e61fa33f0bc5256d2f938fa44c4df10e
+bcaa1ac5644b16f1febb72f31e204720b7bb8934
+e1d8866ffa78fa16d2f39b0ba5344a7269ee5371
+2275fa7d0c75d20103f90b0e1616937d5a9fc5e6
+bdd9a92789d4a86b20a8d3df462df373f41acf23
+b359f11ea09e642695edcd114b463da4395b10c1
+6f43e8933ba3c04072d5d104acc6118aac3e52ee
+ccafe8bd5f9dbfb8b98b0da03ced29608dcfdeec
+939814f341fdd5d35e81a3845a33c4fedb19d2d2
+5f5ad88bf2babe506f927d64d2b7a1e1493dc2ae
+a2014124ca3b3f9ff28fbab0a83ce3c71bf4622e
+77906b653c3eb8a1cd5bd7254e161c00c6086d83
+465cba710284204f9851854587c2887c247222db
+b9471b13256703d3f5eb88b280b4a16ce325ec1b
+62925030859646daeeaf5a4d386a0c41e00dda8a
+5f56aea0ca8b74215a5b982bca32236e1e28c76b
+23148841baa5dbce48f6adcb7ddf83dcd97debb3
+c336d16298a017486c4164c40f8acb28afe64e84
+31eae7b619d166c366bf5df4991f04ba8cebea0a
+d2a38b4a5965d529566566640519d03d2bd10f6c
+b977a025ca21e3b5ca123d8093bd7917694f6da7
+35b585759cbf29f8ec428ef89da20705d59f99ec
+c2bbf9fe8009b22d0f390f3c8c3f13937067590f
+fc9f0643b21cfe571046e27e0c4565f3a1ee96c8
+c088fd6a7e1a38e9d5a9815265cb575bb08d08ff
+5fddbeb678bd2c36c5e5c891ab8f2b143ced5baf
+5d7303c49ac984a9fec60523f2d5297682e16646`, "\n"))
+}
+
+func testDateOrder(c *C, head CommitNode) {
+ iter := NewCommitNodeIterDateOrder(
+ head,
+ nil,
+ nil,
+ )
+
+ var commits []string
+ iter.ForEach(func(c CommitNode) error {
+ commits = append(commits, c.ID().String())
+ return nil
+ })
+
+ c.Assert(commits, DeepEquals, strings.Split(`ec6f456c0e8c7058a29611429965aa05c190b54b
+3048d280d2d5b258d9e582a226ff4bbed34fd5c9
+d82f291cde9987322c8a0c81a325e1ba6159684c
+27aa8cdd2431068606741a589383c02c149ea625
+fa058d42fa3bc53f39108a56dad67157169b2191
+d0a18ccd8eea3bdabc76d6dc5420af1ea30aae9f
+6c629843a1750a27c9af01ed2985f362f619c47a
+cf2874632223220e0445abf0a7806dc772c0b37a
+d10a0e7c1f340a6cfc14540a5f8c508ce7e2eabf
+758ac33217f092bfcded4ad4774954ac054c9609
+214e1dca024fb6da5ed65564d2de734df5dc2127
+70923099e61fa33f0bc5256d2f938fa44c4df10e
+bcaa1ac5644b16f1febb72f31e204720b7bb8934
+e1d8866ffa78fa16d2f39b0ba5344a7269ee5371
+2275fa7d0c75d20103f90b0e1616937d5a9fc5e6
+bdd9a92789d4a86b20a8d3df462df373f41acf23
+b359f11ea09e642695edcd114b463da4395b10c1
+6f43e8933ba3c04072d5d104acc6118aac3e52ee
+ccafe8bd5f9dbfb8b98b0da03ced29608dcfdeec
+939814f341fdd5d35e81a3845a33c4fedb19d2d2
+5f5ad88bf2babe506f927d64d2b7a1e1493dc2ae
+a2014124ca3b3f9ff28fbab0a83ce3c71bf4622e
+77906b653c3eb8a1cd5bd7254e161c00c6086d83
+465cba710284204f9851854587c2887c247222db
+b9471b13256703d3f5eb88b280b4a16ce325ec1b
+62925030859646daeeaf5a4d386a0c41e00dda8a
+5f56aea0ca8b74215a5b982bca32236e1e28c76b
+23148841baa5dbce48f6adcb7ddf83dcd97debb3
+c336d16298a017486c4164c40f8acb28afe64e84
+31eae7b619d166c366bf5df4991f04ba8cebea0a
+b977a025ca21e3b5ca123d8093bd7917694f6da7
+d2a38b4a5965d529566566640519d03d2bd10f6c
+35b585759cbf29f8ec428ef89da20705d59f99ec
+c2bbf9fe8009b22d0f390f3c8c3f13937067590f
+fc9f0643b21cfe571046e27e0c4565f3a1ee96c8
+c088fd6a7e1a38e9d5a9815265cb575bb08d08ff
+5fddbeb678bd2c36c5e5c891ab8f2b143ced5baf
+5d7303c49ac984a9fec60523f2d5297682e16646`, "\n"))
+}
+
+func testAuthorDateOrder(c *C, head CommitNode) {
+ iter := NewCommitNodeIterAuthorDateOrder(
+ head,
+ nil,
+ nil,
+ )
+
+ var commits []string
+ iter.ForEach(func(c CommitNode) error {
+ commits = append(commits, c.ID().String())
+ return nil
+ })
+
+ c.Assert(commits, DeepEquals, strings.Split(`ec6f456c0e8c7058a29611429965aa05c190b54b
+3048d280d2d5b258d9e582a226ff4bbed34fd5c9
+d82f291cde9987322c8a0c81a325e1ba6159684c
+27aa8cdd2431068606741a589383c02c149ea625
+fa058d42fa3bc53f39108a56dad67157169b2191
+d0a18ccd8eea3bdabc76d6dc5420af1ea30aae9f
+6c629843a1750a27c9af01ed2985f362f619c47a
+cf2874632223220e0445abf0a7806dc772c0b37a
+d10a0e7c1f340a6cfc14540a5f8c508ce7e2eabf
+758ac33217f092bfcded4ad4774954ac054c9609
+214e1dca024fb6da5ed65564d2de734df5dc2127
+70923099e61fa33f0bc5256d2f938fa44c4df10e
+bcaa1ac5644b16f1febb72f31e204720b7bb8934
+e1d8866ffa78fa16d2f39b0ba5344a7269ee5371
+2275fa7d0c75d20103f90b0e1616937d5a9fc5e6
+bdd9a92789d4a86b20a8d3df462df373f41acf23
+b359f11ea09e642695edcd114b463da4395b10c1
+6f43e8933ba3c04072d5d104acc6118aac3e52ee
+ccafe8bd5f9dbfb8b98b0da03ced29608dcfdeec
+939814f341fdd5d35e81a3845a33c4fedb19d2d2
+5f5ad88bf2babe506f927d64d2b7a1e1493dc2ae
+a2014124ca3b3f9ff28fbab0a83ce3c71bf4622e
+77906b653c3eb8a1cd5bd7254e161c00c6086d83
+465cba710284204f9851854587c2887c247222db
+b9471b13256703d3f5eb88b280b4a16ce325ec1b
+5f56aea0ca8b74215a5b982bca32236e1e28c76b
+62925030859646daeeaf5a4d386a0c41e00dda8a
+23148841baa5dbce48f6adcb7ddf83dcd97debb3
+c336d16298a017486c4164c40f8acb28afe64e84
+31eae7b619d166c366bf5df4991f04ba8cebea0a
+b977a025ca21e3b5ca123d8093bd7917694f6da7
+d2a38b4a5965d529566566640519d03d2bd10f6c
+35b585759cbf29f8ec428ef89da20705d59f99ec
+c2bbf9fe8009b22d0f390f3c8c3f13937067590f
+fc9f0643b21cfe571046e27e0c4565f3a1ee96c8
+c088fd6a7e1a38e9d5a9815265cb575bb08d08ff
+5fddbeb678bd2c36c5e5c891ab8f2b143ced5baf
+5d7303c49ac984a9fec60523f2d5297682e16646`, "\n"))
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_topo_order.go b/plumbing/object/commitgraph/commitnode_walker_topo_order.go
new file mode 100644
index 0000000..29f4bb7
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_topo_order.go
@@ -0,0 +1,161 @@
+package commitgraph
+
+import (
+ "io"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/storer"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+)
+
+type commitNodeIteratorTopological struct {
+ exploreStack commitNodeStackable
+ visitStack commitNodeStackable
+ inCounts map[plumbing.Hash]int
+
+ ignore map[plumbing.Hash]struct{}
+}
+
+// NewCommitNodeIterTopoOrder returns a CommitNodeIter that walks the commit history,
+// starting at the given commit and visiting its parents in a topological order but
+// with the constraint that no parent is emitted before its children are emitted.
+//
+// This matches `git log --topo-order`
+func NewCommitNodeIterTopoOrder(c CommitNode,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitNodeIter {
+ seen := composeIgnores(ignore, seenExternal)
+ inCounts := make(map[plumbing.Hash]int)
+
+ heap := &commitNodeHeap{binaryheap.NewWith(generationAndDateOrderComparator)}
+ heap.Push(c)
+
+ lifo := &commitNodeLifo{make([]CommitNode, 0, 8)}
+ lifo.Push(c)
+
+ return &commitNodeIteratorTopological{
+ exploreStack: heap,
+ visitStack: lifo,
+ inCounts: inCounts,
+ ignore: seen,
+ }
+}
+
+func (iter *commitNodeIteratorTopological) Next() (CommitNode, error) {
+ var next CommitNode
+ for {
+ var ok bool
+ next, ok = iter.visitStack.Pop()
+ if !ok {
+ return nil, io.EOF
+ }
+
+ if iter.inCounts[next.ID()] == 0 {
+ break
+ }
+ }
+
+ minimumLevel, generationV2 := next.GenerationV2(), true
+ if minimumLevel == 0 {
+ minimumLevel, generationV2 = next.Generation(), false
+ }
+
+ parents := make([]CommitNode, 0, len(next.ParentHashes()))
+ for i := range next.ParentHashes() {
+ pc, err := next.ParentNode(i)
+ if err != nil {
+ return nil, err
+ }
+
+ parents = append(parents, pc)
+
+ if generationV2 {
+ if pc.GenerationV2() < minimumLevel {
+ minimumLevel = pc.GenerationV2()
+ }
+ continue
+ }
+
+ if pc.Generation() < minimumLevel {
+ minimumLevel = pc.Generation()
+ }
+ }
+
+ // EXPLORE
+ for {
+ toExplore, ok := iter.exploreStack.Peek()
+ if !ok {
+ break
+ }
+
+ if toExplore.ID() != next.ID() && iter.exploreStack.Size() == 1 {
+ break
+ }
+ if generationV2 {
+ if toExplore.GenerationV2() < minimumLevel {
+ break
+ }
+ } else {
+ if toExplore.Generation() < minimumLevel {
+ break
+ }
+ }
+
+ iter.exploreStack.Pop()
+ for i, h := range toExplore.ParentHashes() {
+ if _, has := iter.ignore[h]; has {
+ continue
+ }
+ iter.inCounts[h]++
+
+ if iter.inCounts[h] == 1 {
+ pc, err := toExplore.ParentNode(i)
+ if err != nil {
+ return nil, err
+ }
+ iter.exploreStack.Push(pc)
+ }
+ }
+ }
+
+ // VISIT
+ for i, h := range next.ParentHashes() {
+ if _, has := iter.ignore[h]; has {
+ continue
+ }
+ iter.inCounts[h]--
+
+ if iter.inCounts[h] == 0 {
+ iter.visitStack.Push(parents[i])
+ }
+ }
+ delete(iter.inCounts, next.ID())
+
+ return next, nil
+}
+
+func (iter *commitNodeIteratorTopological) ForEach(cb func(CommitNode) error) error {
+ for {
+ obj, err := iter.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+
+ return err
+ }
+
+ if err := cb(obj); err != nil {
+ if err == storer.ErrStop {
+ return nil
+ }
+
+ return err
+ }
+ }
+}
+
+func (iter *commitNodeIteratorTopological) Close() {
+}