aboutsummaryrefslogtreecommitdiffstats
path: root/plumbing
diff options
context:
space:
mode:
Diffstat (limited to 'plumbing')
-rw-r--r--plumbing/filemode/filemode.go6
-rw-r--r--plumbing/filemode/filemode_test.go2
-rw-r--r--plumbing/format/commitgraph/encoder.go14
-rw-r--r--plumbing/format/commitgraph/file.go2
-rw-r--r--plumbing/format/commitgraph/memory.go2
-rw-r--r--plumbing/format/diff/unified_encoder.go25
-rw-r--r--plumbing/format/diff/unified_encoder_test.go90
-rw-r--r--plumbing/format/gitattributes/pattern.go2
-rw-r--r--plumbing/format/idxfile/decoder.go6
-rw-r--r--plumbing/format/idxfile/writer.go2
-rw-r--r--plumbing/format/index/decoder_test.go2
-rw-r--r--plumbing/format/index/doc.go4
-rw-r--r--plumbing/format/index/encoder_test.go6
-rw-r--r--plumbing/format/index/index.go2
-rw-r--r--plumbing/format/packfile/diff_delta.go13
-rw-r--r--plumbing/format/packfile/packfile.go8
-rw-r--r--plumbing/format/packfile/parser.go99
-rw-r--r--plumbing/format/packfile/patch_delta.go53
-rw-r--r--plumbing/format/packfile/scanner_test.go1
-rw-r--r--plumbing/hash.go2
-rw-r--r--plumbing/object/commit_stats_test.go8
-rw-r--r--plumbing/object/commit_walker_bfs_filtered.go176
-rw-r--r--plumbing/object/commit_walker_bfs_filtered_test.go256
-rw-r--r--plumbing/object/commit_walker_limit.go65
-rw-r--r--plumbing/object/commit_walker_path.go (renamed from plumbing/object/commit_walker_file.go)40
-rw-r--r--plumbing/object/merge_base.go210
-rw-r--r--plumbing/object/merge_base_test.go323
-rw-r--r--plumbing/object/object.go18
-rw-r--r--plumbing/object/patch.go2
-rw-r--r--plumbing/object/patch_test.go1
-rw-r--r--plumbing/object/tree.go4
-rw-r--r--plumbing/protocol/packp/advrefs.go2
-rw-r--r--plumbing/protocol/packp/advrefs_decode.go4
-rw-r--r--plumbing/protocol/packp/capability/list.go6
-rw-r--r--plumbing/protocol/packp/capability/list_test.go2
-rw-r--r--plumbing/protocol/packp/ulreq.go4
-rw-r--r--plumbing/protocol/packp/ulreq_encode.go4
-rw-r--r--plumbing/protocol/packp/updreq_decode.go12
-rw-r--r--plumbing/protocol/packp/uppackreq.go4
-rw-r--r--plumbing/storer/object.go4
-rw-r--r--plumbing/transport/http/common.go10
-rw-r--r--plumbing/transport/http/common_test.go2
-rw-r--r--plumbing/transport/internal/common/common.go2
-rw-r--r--plumbing/transport/internal/common/common_test.go2
-rw-r--r--plumbing/transport/server/server.go5
-rw-r--r--plumbing/transport/ssh/auth_method.go2
46 files changed, 1313 insertions, 196 deletions
diff --git a/plumbing/filemode/filemode.go b/plumbing/filemode/filemode.go
index 0994bc4..594984f 100644
--- a/plumbing/filemode/filemode.go
+++ b/plumbing/filemode/filemode.go
@@ -32,10 +32,10 @@ const (
Regular FileMode = 0100644
// Deprecated represent non-executable files with the group writable
// bit set. This mode was supported by the first versions of git,
- // but it has been deprecatred nowadays. This library uses them
+ // but it has been deprecated nowadays. This library uses them
// internally, so you can read old packfiles, but will treat them as
// Regulars when interfacing with the outside world. This is the
- // standard git behaviuor.
+ // standard git behaviour.
Deprecated FileMode = 0100664
// Executable represents executable files.
Executable FileMode = 0100755
@@ -152,7 +152,7 @@ func (m FileMode) IsRegular() bool {
}
// IsFile returns if the FileMode represents that of a file, this is,
-// Regular, Deprecated, Excutable or Link.
+// Regular, Deprecated, Executable or Link.
func (m FileMode) IsFile() bool {
return m == Regular ||
m == Deprecated ||
diff --git a/plumbing/filemode/filemode_test.go b/plumbing/filemode/filemode_test.go
index 299c96a..8d713f6 100644
--- a/plumbing/filemode/filemode_test.go
+++ b/plumbing/filemode/filemode_test.go
@@ -126,7 +126,7 @@ func (s *ModeSuite) TestNewFromOsFileModeExclusive(c *C) {
}
func (s *ModeSuite) TestNewFromOsFileModeTemporary(c *C) {
- // temporaty files are ignored
+ // temporary files are ignored
fixture{
input: os.FileMode(0644) | os.ModeTemporary, // Trw-r--r--
expected: Empty, err: "no equivalent.*",
diff --git a/plumbing/format/commitgraph/encoder.go b/plumbing/format/commitgraph/encoder.go
index a06871c..615e833 100644
--- a/plumbing/format/commitgraph/encoder.go
+++ b/plumbing/format/commitgraph/encoder.go
@@ -24,8 +24,6 @@ func NewEncoder(w io.Writer) *Encoder {
// Encode writes an index into the commit-graph file
func (e *Encoder) Encode(idx Index) error {
- var err error
-
// Get all the hashes in the input index
hashes := idx.Hashes()
@@ -39,26 +37,26 @@ func (e *Encoder) Encode(idx Index) error {
chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
}
- if err = e.encodeFileHeader(len(chunkSignatures)); err != nil {
+ if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
return err
}
- if err = e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
+ if err := e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
return err
}
- if err = e.encodeFanout(fanout); err != nil {
+ if err := e.encodeFanout(fanout); err != nil {
return err
}
- if err = e.encodeOidLookup(hashes); err != nil {
+ if err := e.encodeOidLookup(hashes); err != nil {
return err
}
if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
if err = e.encodeExtraEdges(extraEdges); err != nil {
return err
}
- }
- if err != nil {
+ } else {
return err
}
+
return e.encodeChecksum()
}
diff --git a/plumbing/format/commitgraph/file.go b/plumbing/format/commitgraph/file.go
index 175d279..1f82abd 100644
--- a/plumbing/format/commitgraph/file.go
+++ b/plumbing/format/commitgraph/file.go
@@ -249,7 +249,7 @@ func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error
// Hashes returns all the hashes that are available in the index
func (fi *fileIndex) Hashes() []plumbing.Hash {
hashes := make([]plumbing.Hash, fi.fanout[0xff])
- for i := 0; i < int(fi.fanout[0xff]); i++ {
+ for i := 0; i < fi.fanout[0xff]; i++ {
offset := fi.oidLookupOffset + int64(i)*20
if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
return nil
diff --git a/plumbing/format/commitgraph/memory.go b/plumbing/format/commitgraph/memory.go
index a4a96e9..f5afd4c 100644
--- a/plumbing/format/commitgraph/memory.go
+++ b/plumbing/format/commitgraph/memory.go
@@ -31,7 +31,7 @@ func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
// GetCommitDataByIndex gets the commit node from the commit graph using index
// obtained from child node, if available
func (mi *MemoryIndex) GetCommitDataByIndex(i int) (*CommitData, error) {
- if int(i) >= len(mi.commitData) {
+ if i >= len(mi.commitData) {
return nil, plumbing.ErrObjectNotFound
}
diff --git a/plumbing/format/diff/unified_encoder.go b/plumbing/format/diff/unified_encoder.go
index 8bd6d8a..ce3bc7c 100644
--- a/plumbing/format/diff/unified_encoder.go
+++ b/plumbing/format/diff/unified_encoder.go
@@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"io"
+ "regexp"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -25,9 +26,10 @@ const (
tPath = "+++ %s\n"
binary = "Binary files %s and %s differ\n"
- addLine = "+%s\n"
- deleteLine = "-%s\n"
- equalLine = " %s\n"
+ addLine = "+%s%s"
+ deleteLine = "-%s%s"
+ equalLine = " %s%s"
+ noNewLine = "\n\\ No newline at end of file\n"
oldMode = "old mode %o\n"
newMode = "new mode %o\n"
@@ -94,7 +96,7 @@ func (e *UnifiedEncoder) printMessage(message string) {
isEmpty := message == ""
hasSuffix := strings.HasSuffix(message, "\n")
if !isEmpty && !hasSuffix {
- message = message + "\n"
+ message += "\n"
}
e.buf.WriteString(message)
@@ -216,7 +218,7 @@ func (c *hunksGenerator) processHunk(i int, op Operation) {
linesBefore = c.ctxLines
}
- c.current = &hunk{ctxPrefix: ctxPrefix}
+ c.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")}
c.current.AddOp(Equal, c.beforeContext...)
switch op {
@@ -279,12 +281,13 @@ func (c *hunksGenerator) processEqualsLines(ls []string, i int) {
}
}
+var splitLinesRE = regexp.MustCompile(`[^\n]*(\n|$)`)
+
func splitLines(s string) []string {
- out := strings.Split(s, "\n")
+ out := splitLinesRE.FindAllString(s, -1)
if out[len(out)-1] == "" {
out = out[:len(out)-1]
}
-
return out
}
@@ -346,7 +349,7 @@ type op struct {
}
func (o *op) String() string {
- var prefix string
+ var prefix, suffix string
switch o.t {
case Add:
prefix = addLine
@@ -355,6 +358,10 @@ func (o *op) String() string {
case Equal:
prefix = equalLine
}
+ n := len(o.text)
+ if n > 0 && o.text[n-1] != '\n' {
+ suffix = noNewLine
+ }
- return fmt.Sprintf(prefix, o.text)
+ return fmt.Sprintf(prefix, o.text, suffix)
}
diff --git a/plumbing/format/diff/unified_encoder_test.go b/plumbing/format/diff/unified_encoder_test.go
index 7736af1..091a96a 100644
--- a/plumbing/format/diff/unified_encoder_test.go
+++ b/plumbing/format/diff/unified_encoder_test.go
@@ -83,7 +83,7 @@ var oneChunkPatch Patch = testPatch{
content: "A\n",
op: Delete,
}, {
- content: "B\nC\nD\nE\nF\nG",
+ content: "B\nC\nD\nE\nF\nG\n",
op: Equal,
}, {
content: "H\n",
@@ -125,7 +125,7 @@ var oneChunkPatchInverted Patch = testPatch{
content: "A\n",
op: Add,
}, {
- content: "B\nC\nD\nE\nF\nG",
+ content: "B\nC\nD\nE\nF\nG\n",
op: Equal,
}, {
content: "H\n",
@@ -164,13 +164,13 @@ var fixtures []*fixture = []*fixture{{
seed: "hello\nbug\n",
},
chunks: []testChunk{{
- content: "hello",
+ content: "hello\n",
op: Equal,
}, {
- content: "world",
+ content: "world\n",
op: Delete,
}, {
- content: "bug",
+ content: "bug\n",
op: Add,
}},
}},
@@ -239,18 +239,18 @@ rename to test1.txt
from: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test",
+ seed: "test\n",
},
to: &testFile{
mode: filemode.Regular,
path: "test1.txt",
- seed: "test1",
+ seed: "test1\n",
},
chunks: []testChunk{{
- content: "test",
+ content: "test\n",
op: Delete,
}, {
- content: "test1",
+ content: "test1\n",
op: Add,
}},
}},
@@ -260,7 +260,7 @@ rename to test1.txt
diff: `diff --git a/test.txt b/test1.txt
rename from test.txt
rename to test1.txt
-index 30d74d258442c7c65512eafab474568dd706c430..f079749c42ffdcc5f52ed2d3a6f15b09307e975e 100644
+index 9daeafb9864cf43055ae93beb0afd6c7d144bfa4..a5bce3fd2565d8f458555a0c6f42d0504a848bd5 100644
--- a/test.txt
+++ b/test1.txt
@@ -1 +1 @@
@@ -299,19 +299,19 @@ rename to test1.txt
from: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test",
+ seed: "test\n",
},
to: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test2",
+ seed: "test2\n",
},
chunks: []testChunk{{
- content: "test",
+ content: "test\n",
op: Delete,
}, {
- content: "test2",
+ content: "test2\n",
op: Add,
}},
}},
@@ -320,7 +320,7 @@ rename to test1.txt
desc: "one line change",
context: 1,
diff: `diff --git a/test.txt b/test.txt
-index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d55b2ae9d 100644
+index 9daeafb9864cf43055ae93beb0afd6c7d144bfa4..180cf8328022becee9aaa2577a8f84ea2b9f3827 100644
--- a/test.txt
+++ b/test.txt
@@ -1 +1 @@
@@ -334,19 +334,19 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
from: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test",
+ seed: "test\n",
},
to: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test2",
+ seed: "test2\n",
},
chunks: []testChunk{{
- content: "test",
+ content: "test\n",
op: Delete,
}, {
- content: "test2",
+ content: "test2\n",
op: Add,
}},
}},
@@ -356,7 +356,7 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
context: 1,
diff: `this is the message
diff --git a/test.txt b/test.txt
-index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d55b2ae9d 100644
+index 9daeafb9864cf43055ae93beb0afd6c7d144bfa4..180cf8328022becee9aaa2577a8f84ea2b9f3827 100644
--- a/test.txt
+++ b/test.txt
@@ -1 +1 @@
@@ -397,7 +397,9 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
+++ b/test.txt
@@ -1 +1 @@
-test
+\ No newline at end of file
+test2
+\ No newline at end of file
`,
}, {
patch: testPatch{
@@ -407,7 +409,7 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
to: &testFile{
mode: filemode.Regular,
path: "new.txt",
- seed: "test\ntest2\test3",
+ seed: "test\ntest2\ntest3",
},
chunks: []testChunk{{
@@ -421,13 +423,14 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
context: 1,
diff: `diff --git a/new.txt b/new.txt
new file mode 100644
-index 0000000000000000000000000000000000000000..65c8dd02a42273038658a22b1cb29c8d9457ca12
+index 0000000000000000000000000000000000000000..3ceaab5442b64a0c2b33dd25fae67ccdb4fd1ea8
--- /dev/null
+++ b/new.txt
@@ -0,0 +1,3 @@
+test
+test2
+test3
+\ No newline at end of file
`,
}, {
patch: testPatch{
@@ -456,6 +459,7 @@ index 30d74d258442c7c65512eafab474568dd706c430..00000000000000000000000000000000
+++ /dev/null
@@ -1 +0,0 @@
-test
+\ No newline at end of file
`,
}, {
patch: oneChunkPatch,
@@ -548,6 +552,7 @@ index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb0
X
Y
Z
+\ No newline at end of file
`,
}, {
patch: oneChunkPatch,
@@ -813,6 +818,47 @@ index 0adddcde4fd38042c354518351820eb06c417c82..553ae669c7a9303cf848fcc749a25692
+++ b/onechunk.txt
@@ -23 +22,0 @@ Y
-Z
+\ No newline at end of file
+`,
+}, {
+ patch: testPatch{
+ message: "",
+ filePatches: []testFilePatch{{
+ from: &testFile{
+ mode: filemode.Regular,
+ path: "onechunk.txt",
+ seed: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\nZ",
+ },
+ to: &testFile{
+ mode: filemode.Regular,
+ path: "onechunk.txt",
+ seed: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY",
+ },
+
+ chunks: []testChunk{{
+ content: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\n",
+ op: Equal,
+ }, {
+ content: "Y\nZ",
+ op: Delete,
+ }, {
+ content: "Y",
+ op: Add,
+ }},
+ }},
+ },
+ desc: "remove last letter and no newline at end of file",
+ context: 0,
+ diff: `diff --git a/onechunk.txt b/onechunk.txt
+index 0adddcde4fd38042c354518351820eb06c417c82..d39ae38aad7ba9447b5e7998b2e4714f26c9218d 100644
+--- a/onechunk.txt
++++ b/onechunk.txt
+@@ -22,2 +21 @@ X
+-Y
+-Z
+\ No newline at end of file
++Y
+\ No newline at end of file
`,
}}
diff --git a/plumbing/format/gitattributes/pattern.go b/plumbing/format/gitattributes/pattern.go
index c5ca0c7..d961aba 100644
--- a/plumbing/format/gitattributes/pattern.go
+++ b/plumbing/format/gitattributes/pattern.go
@@ -66,7 +66,7 @@ func (p *pattern) Match(path []string) bool {
doublestar = true
}
- switch true {
+ switch {
case strings.Contains(pattern[0], "**"):
return false
diff --git a/plumbing/format/idxfile/decoder.go b/plumbing/format/idxfile/decoder.go
index 5b92782..d1a8a2c 100644
--- a/plumbing/format/idxfile/decoder.go
+++ b/plumbing/format/idxfile/decoder.go
@@ -12,7 +12,7 @@ import (
var (
// ErrUnsupportedVersion is returned by Decode when the idx file version
// is not supported.
- ErrUnsupportedVersion = errors.New("Unsuported version")
+ ErrUnsupportedVersion = errors.New("Unsupported version")
// ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
ErrMalformedIdxFile = errors.New("Malformed IDX file")
)
@@ -110,10 +110,6 @@ func readObjectNames(idx *MemoryIndex, r io.Reader) error {
continue
}
- if buckets < 0 {
- return ErrMalformedIdxFile
- }
-
idx.FanoutMapping[k] = len(idx.Names)
nameLen := int(buckets * objectIDLength)
diff --git a/plumbing/format/idxfile/writer.go b/plumbing/format/idxfile/writer.go
index aa919e7..fcc78c5 100644
--- a/plumbing/format/idxfile/writer.go
+++ b/plumbing/format/idxfile/writer.go
@@ -147,7 +147,7 @@ func (w *Writer) createIndex() (*MemoryIndex, error) {
idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
buf.Truncate(0)
- binary.WriteUint32(buf, uint32(o.CRC32))
+ binary.WriteUint32(buf, o.CRC32)
idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
}
diff --git a/plumbing/format/index/decoder_test.go b/plumbing/format/index/decoder_test.go
index 7468ad0..92d312d 100644
--- a/plumbing/format/index/decoder_test.go
+++ b/plumbing/format/index/decoder_test.go
@@ -115,7 +115,7 @@ func (s *IndexSuite) TestDecodeMergeConflict(c *C) {
{TheirMode, "14f8e368114f561c38e134f6e68ea6fea12d77ed"},
}
- // stagged files
+ // staged files
for i, e := range idx.Entries[4:7] {
c.Assert(e.Stage, Equals, expected[i].Stage)
c.Assert(e.CreatedAt.IsZero(), Equals, true)
diff --git a/plumbing/format/index/doc.go b/plumbing/format/index/doc.go
index f2b3d76..39ae6ad 100644
--- a/plumbing/format/index/doc.go
+++ b/plumbing/format/index/doc.go
@@ -320,7 +320,7 @@
// == End of Index Entry
//
// The End of Index Entry (EOIE) is used to locate the end of the variable
-// length index entries and the begining of the extensions. Code can take
+// length index entries and the beginning of the extensions. Code can take
// advantage of this to quickly locate the index extensions without having
// to parse through all of the index entries.
//
@@ -353,7 +353,7 @@
//
// - A number of index offset entries each consisting of:
//
-// - 32-bit offset from the begining of the file to the first cache entry
+// - 32-bit offset from the beginning of the file to the first cache entry
// in this block of entries.
//
// - 32-bit count of cache entries in this blockpackage index
diff --git a/plumbing/format/index/encoder_test.go b/plumbing/format/index/encoder_test.go
index 78cbbba..ea121fc 100644
--- a/plumbing/format/index/encoder_test.go
+++ b/plumbing/format/index/encoder_test.go
@@ -55,7 +55,7 @@ func (s *IndexSuite) TestEncode(c *C) {
}
-func (s *IndexSuite) TestEncodeUnsuportedVersion(c *C) {
+func (s *IndexSuite) TestEncodeUnsupportedVersion(c *C) {
idx := &Index{Version: 3}
buf := bytes.NewBuffer(nil)
@@ -64,7 +64,7 @@ func (s *IndexSuite) TestEncodeUnsuportedVersion(c *C) {
c.Assert(err, Equals, ErrUnsupportedVersion)
}
-func (s *IndexSuite) TestEncodeWithIntentToAddUnsuportedVersion(c *C) {
+func (s *IndexSuite) TestEncodeWithIntentToAddUnsupportedVersion(c *C) {
idx := &Index{
Version: 2,
Entries: []*Entry{{IntentToAdd: true}},
@@ -76,7 +76,7 @@ func (s *IndexSuite) TestEncodeWithIntentToAddUnsuportedVersion(c *C) {
c.Assert(err, Equals, ErrUnsupportedVersion)
}
-func (s *IndexSuite) TestEncodeWithSkipWorktreeUnsuportedVersion(c *C) {
+func (s *IndexSuite) TestEncodeWithSkipWorktreeUnsupportedVersion(c *C) {
idx := &Index{
Version: 2,
Entries: []*Entry{{SkipWorktree: true}},
diff --git a/plumbing/format/index/index.go b/plumbing/format/index/index.go
index 6c4b7ca..6653c91 100644
--- a/plumbing/format/index/index.go
+++ b/plumbing/format/index/index.go
@@ -198,7 +198,7 @@ type ResolveUndoEntry struct {
}
// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of
-// the variable length index entries and the begining of the extensions. Code
+// the variable length index entries and the beginning of the extensions. Code
// can take advantage of this to quickly locate the index extensions without
// having to parse through all of the index entries.
//
diff --git a/plumbing/format/packfile/diff_delta.go b/plumbing/format/packfile/diff_delta.go
index d35e78a..43f87a0 100644
--- a/plumbing/format/packfile/diff_delta.go
+++ b/plumbing/format/packfile/diff_delta.go
@@ -40,8 +40,8 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.
defer tr.Close()
bb := bufPool.Get().(*bytes.Buffer)
- bb.Reset()
defer bufPool.Put(bb)
+ bb.Reset()
_, err = bb.ReadFrom(br)
if err != nil {
@@ -49,8 +49,8 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.
}
tb := bufPool.Get().(*bytes.Buffer)
- tb.Reset()
defer bufPool.Put(tb)
+ tb.Reset()
_, err = tb.ReadFrom(tr)
if err != nil {
@@ -77,6 +77,7 @@ func DiffDelta(src, tgt []byte) []byte {
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
buf.Reset()
buf.Write(deltaEncodeSize(len(src)))
buf.Write(deltaEncodeSize(len(tgt)))
@@ -86,6 +87,7 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
}
ibuf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(ibuf)
ibuf.Reset()
for i := 0; i < len(tgt); i++ {
offset, l := index.findMatch(src, tgt, i)
@@ -127,12 +129,9 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
}
encodeInsertOperation(ibuf, buf)
- bytes := buf.Bytes()
-
- bufPool.Put(buf)
- bufPool.Put(ibuf)
- return bytes
+ // buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it.
+ return append([]byte{}, buf.Bytes()...)
}
func encodeInsertOperation(ibuf, buf *bytes.Buffer) {
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
index f528073..21a15de 100644
--- a/plumbing/format/packfile/packfile.go
+++ b/plumbing/format/packfile/packfile.go
@@ -133,8 +133,8 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
return h.Length, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset()
defer bufPool.Put(buf)
+ buf.Reset()
if _, _, err := p.s.NextObject(buf); err != nil {
return 0, err
@@ -222,11 +222,11 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
// optimization only if the expanded version of the object still meets
// the small object threshold condition.
buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
buf.Reset()
if _, _, err := p.s.NextObject(buf); err != nil {
return nil, err
}
- defer bufPool.Put(buf)
size = p.getDeltaObjectSize(buf)
if size <= smallObjectThreshold {
@@ -321,12 +321,12 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
buf.Reset()
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
- defer bufPool.Put(buf)
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
}
@@ -351,12 +351,12 @@ func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObjec
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
buf.Reset()
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
- defer bufPool.Put(buf)
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
}
diff --git a/plumbing/format/packfile/parser.go b/plumbing/format/packfile/parser.go
index 71cbba9..d8c0f75 100644
--- a/plumbing/format/packfile/parser.go
+++ b/plumbing/format/packfile/parser.go
@@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"io"
+ "io/ioutil"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
@@ -263,11 +264,14 @@ func (p *Parser) indexObjects() error {
}
func (p *Parser) resolveDeltas() error {
+ buf := &bytes.Buffer{}
for _, obj := range p.oi {
- content, err := p.get(obj)
+ buf.Reset()
+ err := p.get(obj, buf)
if err != nil {
return err
}
+ content := buf.Bytes()
if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
return err
@@ -279,7 +283,7 @@ func (p *Parser) resolveDeltas() error {
if !obj.IsDelta() && len(obj.Children) > 0 {
for _, child := range obj.Children {
- if _, err := p.resolveObject(child, content); err != nil {
+ if err := p.resolveObject(ioutil.Discard, child, content); err != nil {
return err
}
}
@@ -294,82 +298,87 @@ func (p *Parser) resolveDeltas() error {
return nil
}
-func (p *Parser) get(o *objectInfo) (b []byte, err error) {
- var ok bool
+func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) error {
if !o.ExternalRef { // skip cache check for placeholder parents
- b, ok = p.cache.Get(o.Offset)
+ b, ok := p.cache.Get(o.Offset)
+ if ok {
+ _, err := buf.Write(b)
+ return err
+ }
}
// If it's not on the cache and is not a delta we can try to find it in the
// storage, if there's one. External refs must enter here.
- if !ok && p.storage != nil && !o.Type.IsDelta() {
+ if p.storage != nil && !o.Type.IsDelta() {
e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
if err != nil {
- return nil, err
+ return err
}
o.Type = e.Type()
r, err := e.Reader()
if err != nil {
- return nil, err
- }
-
- b = make([]byte, e.Size())
- if _, err = r.Read(b); err != nil {
- return nil, err
+ return err
}
- }
- if b != nil {
- return b, nil
+ _, err = buf.ReadFrom(io.LimitReader(r, e.Size()))
+ return err
}
if o.ExternalRef {
// we were not able to resolve a ref in a thin pack
- return nil, ErrReferenceDeltaNotFound
+ return ErrReferenceDeltaNotFound
}
- var data []byte
if o.DiskType.IsDelta() {
- base, err := p.get(o.Parent)
+ b := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(b)
+ b.Reset()
+ err := p.get(o.Parent, b)
if err != nil {
- return nil, err
+ return err
}
+ base := b.Bytes()
- data, err = p.resolveObject(o, base)
+ err = p.resolveObject(buf, o, base)
if err != nil {
- return nil, err
+ return err
}
} else {
- data, err = p.readData(o)
+ err := p.readData(buf, o)
if err != nil {
- return nil, err
+ return err
}
}
if len(o.Children) > 0 {
+ data := make([]byte, buf.Len())
+ copy(data, buf.Bytes())
p.cache.Put(o.Offset, data)
}
-
- return data, nil
+ return nil
}
func (p *Parser) resolveObject(
+ w io.Writer,
o *objectInfo,
base []byte,
-) ([]byte, error) {
+) error {
if !o.DiskType.IsDelta() {
- return nil, nil
+ return nil
}
-
- data, err := p.readData(o)
+ buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
+ buf.Reset()
+ err := p.readData(buf, o)
if err != nil {
- return nil, err
+ return err
}
+ data := buf.Bytes()
data, err = applyPatchBase(o, data, base)
if err != nil {
- return nil, err
+ return err
}
if p.storage != nil {
@@ -377,37 +386,35 @@ func (p *Parser) resolveObject(
obj.SetSize(o.Size())
obj.SetType(o.Type)
if _, err := obj.Write(data); err != nil {
- return nil, err
+ return err
}
if _, err := p.storage.SetEncodedObject(obj); err != nil {
- return nil, err
+ return err
}
}
-
- return data, nil
+ _, err = w.Write(data)
+ return err
}
-func (p *Parser) readData(o *objectInfo) ([]byte, error) {
+func (p *Parser) readData(w io.Writer, o *objectInfo) error {
if !p.scanner.IsSeekable && o.DiskType.IsDelta() {
data, ok := p.deltas[o.Offset]
if !ok {
- return nil, ErrDeltaNotCached
+ return ErrDeltaNotCached
}
-
- return data, nil
+ _, err := w.Write(data)
+ return err
}
if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
- return nil, err
+ return err
}
- buf := new(bytes.Buffer)
- if _, _, err := p.scanner.NextObject(buf); err != nil {
- return nil, err
+ if _, _, err := p.scanner.NextObject(w); err != nil {
+ return err
}
-
- return buf.Bytes(), nil
+ return nil
}
func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
diff --git a/plumbing/format/packfile/patch_delta.go b/plumbing/format/packfile/patch_delta.go
index a972f1c..e1a5141 100644
--- a/plumbing/format/packfile/patch_delta.go
+++ b/plumbing/format/packfile/patch_delta.go
@@ -1,8 +1,9 @@
package packfile
import (
+ "bytes"
"errors"
- "io/ioutil"
+ "io"
"gopkg.in/src-d/go-git.v4/plumbing"
)
@@ -26,19 +27,29 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) error {
return err
}
- src, err := ioutil.ReadAll(r)
+ buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
+ buf.Reset()
+ _, err = buf.ReadFrom(r)
if err != nil {
return err
}
+ src := buf.Bytes()
- dst, err := PatchDelta(src, delta)
+ dst := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(dst)
+ dst.Reset()
+ err = patchDelta(dst, src, delta)
if err != nil {
return err
}
- target.SetSize(int64(len(dst)))
- _, err = w.Write(dst)
+ target.SetSize(int64(dst.Len()))
+
+ b := byteSlicePool.Get().([]byte)
+ _, err = io.CopyBuffer(w, dst, b)
+ byteSlicePool.Put(b)
return err
}
@@ -51,23 +62,31 @@ var (
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
// is not copy from source or copy from delta (ErrDeltaCmd).
func PatchDelta(src, delta []byte) ([]byte, error) {
+ b := &bytes.Buffer{}
+ if err := patchDelta(b, src, delta); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
if len(delta) < deltaSizeMin {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
srcSz, delta := decodeLEB128(delta)
if srcSz != uint(len(src)) {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
targetSz, delta := decodeLEB128(delta)
remainingTargetSz := targetSz
var cmd byte
- dest := make([]byte, 0, targetSz)
+ dst.Grow(int(targetSz))
for {
if len(delta) == 0 {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
cmd = delta[0]
@@ -77,35 +96,35 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
var err error
offset, delta, err = decodeOffset(cmd, delta)
if err != nil {
- return nil, err
+ return err
}
sz, delta, err = decodeSize(cmd, delta)
if err != nil {
- return nil, err
+ return err
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
break
}
- dest = append(dest, src[offset:offset+sz]...)
+ dst.Write(src[offset:offset+sz])
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
if uint(len(delta)) < sz {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
- dest = append(dest, delta[0:sz]...)
+ dst.Write(delta[0:sz])
remainingTargetSz -= sz
delta = delta[sz:]
} else {
- return nil, ErrDeltaCmd
+ return ErrDeltaCmd
}
if remainingTargetSz <= 0 {
@@ -113,7 +132,7 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
}
}
- return dest, nil
+ return nil
}
// Decodes a number encoded as an unsigned LEB128 at the start of some
diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go
index a401d6d..3078477 100644
--- a/plumbing/format/packfile/scanner_test.go
+++ b/plumbing/format/packfile/scanner_test.go
@@ -140,6 +140,7 @@ func (s *ScannerSuite) TestReaderReset(c *C) {
p := NewScanner(r)
version, objects, err := p.Header()
+ c.Assert(err, IsNil)
c.Assert(version, Equals, VersionSupported)
c.Assert(objects, Equals, uint32(31))
diff --git a/plumbing/hash.go b/plumbing/hash.go
index 8e60877..637a425 100644
--- a/plumbing/hash.go
+++ b/plumbing/hash.go
@@ -9,7 +9,7 @@ import (
"strconv"
)
-// Hash SHA1 hased content
+// Hash SHA1 hashed content
type Hash [20]byte
// ZeroHash is Hash with value zero
diff --git a/plumbing/object/commit_stats_test.go b/plumbing/object/commit_stats_test.go
index 2fb3f08..dc9e4ad 100644
--- a/plumbing/object/commit_stats_test.go
+++ b/plumbing/object/commit_stats_test.go
@@ -22,7 +22,7 @@ type CommitStatsSuite struct {
var _ = Suite(&CommitStatsSuite{})
func (s *CommitStatsSuite) TestStats(c *C) {
- r, hash := s.writeHisotry(c, []byte("foo\n"), []byte("foo\nbar\n"))
+ r, hash := s.writeHistory(c, []byte("foo\n"), []byte("foo\nbar\n"))
aCommit, err := r.CommitObject(hash)
c.Assert(err, IsNil)
@@ -37,7 +37,7 @@ func (s *CommitStatsSuite) TestStats(c *C) {
}
func (s *CommitStatsSuite) TestStats_RootCommit(c *C) {
- r, hash := s.writeHisotry(c, []byte("foo\n"))
+ r, hash := s.writeHistory(c, []byte("foo\n"))
aCommit, err := r.CommitObject(hash)
c.Assert(err, IsNil)
@@ -53,7 +53,7 @@ func (s *CommitStatsSuite) TestStats_RootCommit(c *C) {
}
func (s *CommitStatsSuite) TestStats_WithoutNewLine(c *C) {
- r, hash := s.writeHisotry(c, []byte("foo\nbar"), []byte("foo\nbar\n"))
+ r, hash := s.writeHistory(c, []byte("foo\nbar"), []byte("foo\nbar\n"))
aCommit, err := r.CommitObject(hash)
c.Assert(err, IsNil)
@@ -67,7 +67,7 @@ func (s *CommitStatsSuite) TestStats_WithoutNewLine(c *C) {
c.Assert(fileStats[0].String(), Equals, " foo | 2 +-\n")
}
-func (s *CommitStatsSuite) writeHisotry(c *C, files ...[]byte) (*git.Repository, plumbing.Hash) {
+func (s *CommitStatsSuite) writeHistory(c *C, files ...[]byte) (*git.Repository, plumbing.Hash) {
cm := &git.CommitOptions{
Author: &object.Signature{Name: "Foo", Email: "foo@example.local", When: time.Now()},
}
diff --git a/plumbing/object/commit_walker_bfs_filtered.go b/plumbing/object/commit_walker_bfs_filtered.go
new file mode 100644
index 0000000..7b17f15
--- /dev/null
+++ b/plumbing/object/commit_walker_bfs_filtered.go
@@ -0,0 +1,176 @@
+package object
+
+import (
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// NewFilterCommitIter returns a CommitIter that walks the commit history,
+// starting at the passed commit and visiting its parents in Breadth-first order.
+// The commits returned by the CommitIter will validate the passed CommitFilter.
+// The history won't be transversed beyond a commit if isLimit is true for it.
+// Each commit will be visited only once.
+// If the commit history can not be traversed, or the Close() method is called,
+// the CommitIter won't return more commits.
+// If no isValid is passed, all ancestors of from commit will be valid.
+// If no isLimit is limit, all ancestors of all commits will be visited.
+func NewFilterCommitIter(
+ from *Commit,
+ isValid *CommitFilter,
+ isLimit *CommitFilter,
+) CommitIter {
+ var validFilter CommitFilter
+ if isValid == nil {
+ validFilter = func(_ *Commit) bool {
+ return true
+ }
+ } else {
+ validFilter = *isValid
+ }
+
+ var limitFilter CommitFilter
+ if isLimit == nil {
+ limitFilter = func(_ *Commit) bool {
+ return false
+ }
+ } else {
+ limitFilter = *isLimit
+ }
+
+ return &filterCommitIter{
+ isValid: validFilter,
+ isLimit: limitFilter,
+ visited: map[plumbing.Hash]struct{}{},
+ queue: []*Commit{from},
+ }
+}
+
+// CommitFilter returns a boolean for the passed Commit
+type CommitFilter func(*Commit) bool
+
+// filterCommitIter implements CommitIter
+type filterCommitIter struct {
+ isValid CommitFilter
+ isLimit CommitFilter
+ visited map[plumbing.Hash]struct{}
+ queue []*Commit
+ lastErr error
+}
+
+// Next returns the next commit of the CommitIter.
+// It will return io.EOF if there are no more commits to visit,
+// or an error if the history could not be traversed.
+func (w *filterCommitIter) Next() (*Commit, error) {
+ var commit *Commit
+ var err error
+ for {
+ commit, err = w.popNewFromQueue()
+ if err != nil {
+ return nil, w.close(err)
+ }
+
+ w.visited[commit.Hash] = struct{}{}
+
+ if !w.isLimit(commit) {
+ err = w.addToQueue(commit.s, commit.ParentHashes...)
+ if err != nil {
+ return nil, w.close(err)
+ }
+ }
+
+ if w.isValid(commit) {
+ return commit, nil
+ }
+ }
+}
+
+// ForEach runs the passed callback over each Commit returned by the CommitIter
+// until the callback returns an error or there is no more commits to traverse.
+func (w *filterCommitIter) ForEach(cb func(*Commit) error) error {
+ for {
+ commit, err := w.Next()
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if err := cb(commit); err == storer.ErrStop {
+ break
+ } else if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Error returns the error that caused that the CommitIter is no longer returning commits
+func (w *filterCommitIter) Error() error {
+ return w.lastErr
+}
+
+// Close closes the CommitIter
+func (w *filterCommitIter) Close() {
+ w.visited = map[plumbing.Hash]struct{}{}
+ w.queue = []*Commit{}
+ w.isLimit = nil
+ w.isValid = nil
+}
+
+// close closes the CommitIter with an error
+func (w *filterCommitIter) close(err error) error {
+ w.Close()
+ w.lastErr = err
+ return err
+}
+
+// popNewFromQueue returns the first new commit from the internal fifo queue,
+// or an io.EOF error if the queue is empty
+func (w *filterCommitIter) popNewFromQueue() (*Commit, error) {
+ var first *Commit
+ for {
+ if len(w.queue) == 0 {
+ if w.lastErr != nil {
+ return nil, w.lastErr
+ }
+
+ return nil, io.EOF
+ }
+
+ first = w.queue[0]
+ w.queue = w.queue[1:]
+ if _, ok := w.visited[first.Hash]; ok {
+ continue
+ }
+
+ return first, nil
+ }
+}
+
+// addToQueue adds the passed commits to the internal fifo queue if they weren't seen
+// or returns an error if the passed hashes could not be used to get valid commits
+func (w *filterCommitIter) addToQueue(
+ store storer.EncodedObjectStorer,
+ hashes ...plumbing.Hash,
+) error {
+ for _, hash := range hashes {
+ if _, ok := w.visited[hash]; ok {
+ continue
+ }
+
+ commit, err := GetCommit(store, hash)
+ if err != nil {
+ return err
+ }
+
+ w.queue = append(w.queue, commit)
+ }
+
+ return nil
+}
+
diff --git a/plumbing/object/commit_walker_bfs_filtered_test.go b/plumbing/object/commit_walker_bfs_filtered_test.go
new file mode 100644
index 0000000..6984b60
--- /dev/null
+++ b/plumbing/object/commit_walker_bfs_filtered_test.go
@@ -0,0 +1,256 @@
+package object
+
+import (
+ "fmt"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&filterCommitIterSuite{})
+
+type filterCommitIterSuite struct {
+ BaseObjectsSuite
+}
+
+func commitsFromIter(iter CommitIter) ([]*Commit, error) {
+ var commits []*Commit
+ err := iter.ForEach(func(c *Commit) error {
+ commits = append(commits, c)
+ return nil
+ })
+
+ return commits, err
+}
+
+func assertHashes(c *C, commits []*Commit, hashes []string) {
+ if len(commits) != len(hashes) {
+ var expected []string
+ expected = append(expected, hashes...)
+ fmt.Println("expected:", strings.Join(expected, ", "))
+ var got []string
+ for _, c := range commits {
+ got = append(got, c.Hash.String())
+ }
+ fmt.Println(" got:", strings.Join(got, ", "))
+ }
+
+ c.Assert(commits, HasLen, len(hashes))
+ for i, commit := range commits {
+ c.Assert(hashes[i], Equals, commit.Hash.String())
+ }
+}
+
+func validIfCommit(ignored plumbing.Hash) CommitFilter {
+ return func(c *Commit) bool {
+ return c.Hash == ignored
+ }
+}
+
+func not(filter CommitFilter) CommitFilter {
+ return func(c *Commit) bool {
+ return !filter(c)
+ }
+}
+
+/*
+// TestCase history
+
+* 6ecf0ef2c2dffb796033e5a02219af86ec6584e5 <- HEAD
+|
+| * e8d3ffab552895c19b9fcf7aa264d277cde33881
+|/
+* 918c48b83bd081e863dbe1b80f8998f058cd8294
+|
+* af2d6a6954d532f8ffb47615169c8fdf9d383a1a
+|
+* 1669dce138d9b841a518c64b10914d88f5e488ea
+|\
+| * a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69 // isLimit
+| |\
+| | * b8e471f58bcbca63b07bda20e428190409c2db47 // ignored if isLimit is passed
+| |/
+* | 35e85108805c84807bc66a02d91535e1e24b38b9 // isValid; ignored if passed as !isValid
+|/
+* b029517f6300c2da0f4b651b8642506cd6aaf45d
+*/
+
+// TestFilterCommitIter asserts that FilterCommitIter returns all commits from
+// history, but e8d3ffab552895c19b9fcf7aa264d277cde33881, that is not reachable
+// from HEAD
+func (s *filterCommitIterSuite) TestFilterCommitIter(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ commits, err := commitsFromIter(NewFilterCommitIter(from, nil, nil))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// TestFilterCommitIterWithValid asserts that FilterCommitIter returns only commits
+// that matches the passed isValid filter; in this testcase, it was filtered out
+// all commits but one from history
+func (s *filterCommitIterSuite) TestFilterCommitIterWithValid(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ validIf := validIfCommit(plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"))
+ commits, err := commitsFromIter(NewFilterCommitIter(from, &validIf, nil))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// that matches the passed isValid filter; in this testcase, it was filtered out
+// only one commit from history
+func (s *filterCommitIterSuite) TestFilterCommitIterWithInvalid(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ validIf := validIfCommit(plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"))
+ validIfNot := not(validIf)
+ commits, err := commitsFromIter(NewFilterCommitIter(from, &validIfNot, nil))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// TestFilterCommitIterWithNoValidCommits asserts that FilterCommitIter returns
+// no commits if the passed isValid filter does not allow any commit
+func (s *filterCommitIterSuite) TestFilterCommitIterWithNoValidCommits(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ validIf := validIfCommit(plumbing.NewHash("THIS_COMMIT_DOES_NOT_EXIST"))
+ commits, err := commitsFromIter(NewFilterCommitIter(from, &validIf, nil))
+ c.Assert(err, IsNil)
+ c.Assert(commits, HasLen, 0)
+}
+
+// TestFilterCommitIterWithStopAt asserts that FilterCommitIter returns only commits
+// are not beyond a isLimit filter
+func (s *filterCommitIterSuite) TestFilterCommitIterWithStopAt(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ stopAtRule := validIfCommit(plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"))
+ commits, err := commitsFromIter(NewFilterCommitIter(from, nil, &stopAtRule))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// TestFilterCommitIterWithStopAt asserts that FilterCommitIter works properly
+// with isValid and isLimit filters
+func (s *filterCommitIterSuite) TestFilterCommitIterWithInvalidAndStopAt(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ stopAtRule := validIfCommit(plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"))
+ validIf := validIfCommit(plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"))
+ validIfNot := not(validIf)
+ commits, err := commitsFromIter(NewFilterCommitIter(from, &validIfNot, &stopAtRule))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// TestIteratorForEachCallbackReturn that ForEach callback does not cause
+// the ForEach to return an error if it returned an ErrStop
+//
+// - 6ecf0ef2c2dffb796033e5a02219af86ec6584e5
+// - 918c48b83bd081e863dbe1b80f8998f058cd8294 //<- stop
+// - af2d6a6954d532f8ffb47615169c8fdf9d383a1a
+// - 1669dce138d9b841a518c64b10914d88f5e488ea //<- err
+// - 35e85108805c84807bc66a02d91535e1e24b38b9
+// - a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69
+// - b029517f6300c2da0f4b651b8642506cd6aaf45d
+// - b8e471f58bcbca63b07bda20e428190409c2db47
+func (s *filterCommitIterSuite) TestIteratorForEachCallbackReturn(c *C) {
+
+ var visited []*Commit
+ errUnexpected := fmt.Errorf("Could not continue")
+ cb := func(c *Commit) error {
+ switch c.Hash {
+ case plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"):
+ return storer.ErrStop
+ case plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"):
+ return errUnexpected
+ }
+
+ visited = append(visited, c)
+ return nil
+ }
+
+ from := s.commit(c, s.Fixture.Head)
+
+ iter := NewFilterCommitIter(from, nil, nil)
+ err := iter.ForEach(cb)
+ c.Assert(err, IsNil)
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ }
+ assertHashes(c, visited, expected)
+
+ err = iter.ForEach(cb)
+ c.Assert(err, Equals, errUnexpected)
+ expected = []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ }
+ assertHashes(c, visited, expected)
+
+ err = iter.ForEach(cb)
+ c.Assert(err, IsNil)
+ expected = []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ }
+ assertHashes(c, visited, expected)
+}
diff --git a/plumbing/object/commit_walker_limit.go b/plumbing/object/commit_walker_limit.go
new file mode 100644
index 0000000..ee56e50
--- /dev/null
+++ b/plumbing/object/commit_walker_limit.go
@@ -0,0 +1,65 @@
+package object
+
+import (
+ "io"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+type commitLimitIter struct {
+ sourceIter CommitIter
+ limitOptions LogLimitOptions
+}
+
+type LogLimitOptions struct {
+ Since *time.Time
+ Until *time.Time
+}
+
+func NewCommitLimitIterFromIter(commitIter CommitIter, limitOptions LogLimitOptions) CommitIter {
+ iterator := new(commitLimitIter)
+ iterator.sourceIter = commitIter
+ iterator.limitOptions = limitOptions
+ return iterator
+}
+
+func (c *commitLimitIter) Next() (*Commit, error) {
+ for {
+ commit, err := c.sourceIter.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ if c.limitOptions.Since != nil && commit.Committer.When.Before(*c.limitOptions.Since) {
+ continue
+ }
+ if c.limitOptions.Until != nil && commit.Committer.When.After(*c.limitOptions.Until) {
+ continue
+ }
+ return commit, nil
+ }
+}
+
+func (c *commitLimitIter) ForEach(cb func(*Commit) error) error {
+ for {
+ commit, nextErr := c.Next()
+ if nextErr == io.EOF {
+ break
+ }
+ if nextErr != nil {
+ return nextErr
+ }
+ err := cb(commit)
+ if err == storer.ErrStop {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *commitLimitIter) Close() {
+ c.sourceIter.Close()
+}
diff --git a/plumbing/object/commit_walker_file.go b/plumbing/object/commit_walker_path.go
index 6f16e61..6a49fd1 100644
--- a/plumbing/object/commit_walker_file.go
+++ b/plumbing/object/commit_walker_path.go
@@ -8,27 +8,39 @@ import (
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
-type commitFileIter struct {
- fileName string
+type commitPathIter struct {
+ pathFilter func(string) bool
sourceIter CommitIter
currentCommit *Commit
checkParent bool
}
-// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between
+// NewCommitPathIterFromIter returns a commit iterator which performs diffTree between
// successive trees returned from the commit iterator from the argument. The purpose of this is
// to find the commits that explain how the files that match the path came to be.
// If checkParent is true then the function double checks if potential parent (next commit in a path)
// is one of the parents in the tree (it's used by `git log --all`).
-func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
- iterator := new(commitFileIter)
+// pathFilter is a function that takes path of file as argument and returns true if we want it
+func NewCommitPathIterFromIter(pathFilter func(string) bool, commitIter CommitIter, checkParent bool) CommitIter {
+ iterator := new(commitPathIter)
iterator.sourceIter = commitIter
- iterator.fileName = fileName
+ iterator.pathFilter = pathFilter
iterator.checkParent = checkParent
return iterator
}
-func (c *commitFileIter) Next() (*Commit, error) {
+// this function is kept for compatibilty, can be replaced with NewCommitPathIterFromIter
+func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
+ return NewCommitPathIterFromIter(
+ func(path string) bool {
+ return path == fileName
+ },
+ commitIter,
+ checkParent,
+ )
+}
+
+func (c *commitPathIter) Next() (*Commit, error) {
if c.currentCommit == nil {
var err error
c.currentCommit, err = c.sourceIter.Next()
@@ -45,7 +57,7 @@ func (c *commitFileIter) Next() (*Commit, error) {
return commit, commitErr
}
-func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
+func (c *commitPathIter) getNextFileCommit() (*Commit, error) {
for {
// Parent-commit can be nil if the current-commit is the initial commit
parentCommit, parentCommitErr := c.sourceIter.Next()
@@ -96,9 +108,9 @@ func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
}
}
-func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool {
+func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool {
for _, change := range changes {
- if change.name() != c.fileName {
+ if !c.pathFilter(change.name()) {
continue
}
@@ -125,9 +137,12 @@ func isParentHash(hash plumbing.Hash, commit *Commit) bool {
return false
}
-func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
+func (c *commitPathIter) ForEach(cb func(*Commit) error) error {
for {
commit, nextErr := c.Next()
+ if nextErr == io.EOF {
+ break
+ }
if nextErr != nil {
return nextErr
}
@@ -138,8 +153,9 @@ func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
return err
}
}
+ return nil
}
-func (c *commitFileIter) Close() {
+func (c *commitPathIter) Close() {
c.sourceIter.Close()
}
diff --git a/plumbing/object/merge_base.go b/plumbing/object/merge_base.go
new file mode 100644
index 0000000..6f2568d
--- /dev/null
+++ b/plumbing/object/merge_base.go
@@ -0,0 +1,210 @@
+package object
+
+import (
+ "fmt"
+ "sort"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// errIsReachable is thrown when first commit is an ancestor of the second
+var errIsReachable = fmt.Errorf("first is reachable from second")
+
+// MergeBase mimics the behavior of `git merge-base actual other`, returning the
+// best common ancestor between the actual and the passed one.
+// The best common ancestors can not be reached from other common ancestors.
+func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) {
+ // use sortedByCommitDateDesc strategy
+ sorted := sortByCommitDateDesc(c, other)
+ newer := sorted[0]
+ older := sorted[1]
+
+ newerHistory, err := ancestorsIndex(older, newer)
+ if err == errIsReachable {
+ return []*Commit{older}, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ var res []*Commit
+ inNewerHistory := isInIndexCommitFilter(newerHistory)
+ resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory)
+ _ = resIter.ForEach(func(commit *Commit) error {
+ res = append(res, commit)
+ return nil
+ })
+
+ return Independents(res)
+}
+
+// IsAncestor returns true if the actual commit is ancestor of the passed one.
+// It returns an error if the history is not transversable
+// It mimics the behavior of `git merge --is-ancestor actual other`
+func (c *Commit) IsAncestor(other *Commit) (bool, error) {
+ found := false
+ iter := NewCommitPreorderIter(other, nil, nil)
+ err := iter.ForEach(func(comm *Commit) error {
+ if comm.Hash != c.Hash {
+ return nil
+ }
+
+ found = true
+ return storer.ErrStop
+ })
+
+ return found, err
+}
+
+// ancestorsIndex returns a map with the ancestors of the starting commit if the
+// excluded one is not one of them. It returns errIsReachable if the excluded commit
+// is ancestor of the starting, or another error if the history is not traversable.
+func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) {
+ if excluded.Hash.String() == starting.Hash.String() {
+ return nil, errIsReachable
+ }
+
+ startingHistory := map[plumbing.Hash]struct{}{}
+ startingIter := NewCommitIterBSF(starting, nil, nil)
+ err := startingIter.ForEach(func(commit *Commit) error {
+ if commit.Hash == excluded.Hash {
+ return errIsReachable
+ }
+
+ startingHistory[commit.Hash] = struct{}{}
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return startingHistory, nil
+}
+
+// Independents returns a subset of the passed commits, that are not reachable the others
+// It mimics the behavior of `git merge-base --independent commit...`.
+func Independents(commits []*Commit) ([]*Commit, error) {
+ // use sortedByCommitDateDesc strategy
+ candidates := sortByCommitDateDesc(commits...)
+ candidates = removeDuplicated(candidates)
+
+ seen := map[plumbing.Hash]struct{}{}
+ var isLimit CommitFilter = func(commit *Commit) bool {
+ _, ok := seen[commit.Hash]
+ return ok
+ }
+
+ if len(candidates) < 2 {
+ return candidates, nil
+ }
+
+ pos := 0
+ for {
+ from := candidates[pos]
+ others := remove(candidates, from)
+ fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit)
+ err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error {
+ for _, other := range others {
+ if fromAncestor.Hash == other.Hash {
+ candidates = remove(candidates, other)
+ others = remove(others, other)
+ }
+ }
+
+ if len(candidates) == 1 {
+ return storer.ErrStop
+ }
+
+ seen[fromAncestor.Hash] = struct{}{}
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ nextPos := indexOf(candidates, from) + 1
+ if nextPos >= len(candidates) {
+ break
+ }
+
+ pos = nextPos
+ }
+
+ return candidates, nil
+}
+
+// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc`
+//
+// Following this strategy, it is tried to reduce the time needed when walking
+// the history from one commit to reach the others. It is assumed that ancestors
+// use to be committed before its descendant;
+// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`;
+// so starting by `A` it will be reached `A^` way sooner than walking from `A^`
+// to the initial commit, and then from `A` to `A^`.
+func sortByCommitDateDesc(commits ...*Commit) []*Commit {
+ sorted := make([]*Commit, len(commits))
+ copy(sorted, commits)
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].Committer.When.After(sorted[j].Committer.When)
+ })
+
+ return sorted
+}
+
+// indexOf returns the first position where target was found in the passed commits
+func indexOf(commits []*Commit, target *Commit) int {
+ for i, commit := range commits {
+ if target.Hash == commit.Hash {
+ return i
+ }
+ }
+
+ return -1
+}
+
+// remove returns the passed commits excluding the commit toDelete
+func remove(commits []*Commit, toDelete *Commit) []*Commit {
+ res := make([]*Commit, len(commits))
+ j := 0
+ for _, commit := range commits {
+ if commit.Hash == toDelete.Hash {
+ continue
+ }
+
+ res[j] = commit
+ j++
+ }
+
+ return res[:j]
+}
+
+// removeDuplicated removes duplicated commits from the passed slice of commits
+func removeDuplicated(commits []*Commit) []*Commit {
+ seen := make(map[plumbing.Hash]struct{}, len(commits))
+ res := make([]*Commit, len(commits))
+ j := 0
+ for _, commit := range commits {
+ if _, ok := seen[commit.Hash]; ok {
+ continue
+ }
+
+ seen[commit.Hash] = struct{}{}
+ res[j] = commit
+ j++
+ }
+
+ return res[:j]
+}
+
+// isInIndexCommitFilter returns a commitFilter that returns true
+// if the commit is in the passed index.
+func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter {
+ return func(c *Commit) bool {
+ _, ok := index[c.Hash]
+ return ok
+ }
+}
diff --git a/plumbing/object/merge_base_test.go b/plumbing/object/merge_base_test.go
new file mode 100644
index 0000000..72c9cd9
--- /dev/null
+++ b/plumbing/object/merge_base_test.go
@@ -0,0 +1,323 @@
+package object
+
+import (
+ "fmt"
+ "sort"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem"
+
+ . "gopkg.in/check.v1"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
+)
+
+func alphabeticSortCommits(commits []*Commit) {
+ sort.Slice(commits, func(i, j int) bool {
+ return commits[i].Hash.String() > commits[j].Hash.String()
+ })
+}
+
+/*
+
+The following tests consider this history having two root commits: V and W
+
+V---o---M----AB----A---CD1--P---C--------S-------------------Q < master
+ \ \ / / /
+ \ X GQ1---G < feature /
+ \ / \ / / /
+W---o---N----o----B---CD2---o---D----o----GQ2------------o < dev
+
+MergeBase
+----------------------------
+passed merge-base
+ M, N Commits with unrelated history, have no merge-base
+ A, B AB Regular merge-base between two commits
+ A, A A The merge-commit between equal commits, is the same
+ Q, N N The merge-commit between a commit an its ancestor, is the ancestor
+ C, D CD1, CD2 Cross merges causes more than one merge-base
+ G, Q GQ1, GQ2 Feature branches including merges, causes more than one merge-base
+
+Independents
+----------------------------
+candidates result
+ A A Only one commit returns it
+ A, A, A A Repeated commits are ignored
+ A, A, M, M, N A, N M is reachable from A, so it is not independent
+ S, G, P S, G P is reachable from S, so it is not independent
+ CD1, CD2, M, N CD1, CD2 M and N are reachable from CD2, so they're not
+ C, G, dev, M, N C, G, dev M and N are reachable from G, so they're not
+ C, D, M, N C, D M and N are reachable from C, so they're not
+ A, A^, A, N, N^ A, N A^ and N^ are reachable from A and N
+ A^^^, A^, A^^, A, N A, N A^^^, A^^ and A^ are reachable from A, so they're not
+
+IsAncestor
+----------------------------
+passed result
+ A^^, A true Will be true if first is ancestor of the second
+ M, G true True because it will also reach G from M crossing merge commits
+ A, A true True if first and second are the same
+ M, N false Commits with unrelated history, will return false
+*/
+
+var _ = Suite(&mergeBaseSuite{})
+
+type mergeBaseSuite struct {
+ BaseObjectsSuite
+}
+
+func (s *mergeBaseSuite) SetUpSuite(c *C) {
+ s.Suite.SetUpSuite(c)
+ s.Fixture = fixtures.ByTag("merge-base").One()
+ s.Storer = filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault())
+}
+
+var revisionIndex = map[string]plumbing.Hash{
+ "master": plumbing.NewHash("dce0e0c20d701c3d260146e443d6b3b079505191"),
+ "feature": plumbing.NewHash("d1b0093698e398d596ef94d646c4db37e8d1e970"),
+ "dev": plumbing.NewHash("25ca6c810c08482d61113fbcaaada38bb59093a8"),
+ "M": plumbing.NewHash("bb355b64e18386dbc3af63dfd09c015c44cbd9b6"),
+ "N": plumbing.NewHash("d64b894762ab5f09e2b155221b90c18bd0637236"),
+ "A": plumbing.NewHash("29740cfaf0c2ee4bb532dba9e80040ca738f367c"),
+ "B": plumbing.NewHash("2c84807970299ba98951c65fe81ebbaac01030f0"),
+ "AB": plumbing.NewHash("31a7e081a28f149ee98ffd13ba1a6d841a5f46fd"),
+ "P": plumbing.NewHash("ff84393134864cf9d3a9853a81bde81778bd5805"),
+ "C": plumbing.NewHash("8b72fabdc4222c3ff965bc310ded788c601c50ed"),
+ "D": plumbing.NewHash("14777cf3e209334592fbfd0b878f6868394db836"),
+ "CD1": plumbing.NewHash("4709e13a3cbb300c2b8a917effda776e1b8955c7"),
+ "CD2": plumbing.NewHash("38468e274e91e50ffb637b88a1954ab6193fe974"),
+ "S": plumbing.NewHash("628f1a42b70380ed05734bf01b468b46206ef1ea"),
+ "G": plumbing.NewHash("d1b0093698e398d596ef94d646c4db37e8d1e970"),
+ "Q": plumbing.NewHash("dce0e0c20d701c3d260146e443d6b3b079505191"),
+ "GQ1": plumbing.NewHash("ccaaa99c21dad7e9f392c36ae8cb72dc63bed458"),
+ "GQ2": plumbing.NewHash("806824d4778e94fe7c3244e92a9cd07090c9ab54"),
+ "A^": plumbing.NewHash("31a7e081a28f149ee98ffd13ba1a6d841a5f46fd"),
+ "A^^": plumbing.NewHash("bb355b64e18386dbc3af63dfd09c015c44cbd9b6"),
+ "A^^^": plumbing.NewHash("8d08dd1388b82dd354cb43918d83da86c76b0978"),
+ "N^": plumbing.NewHash("b6e1fc8dad4f1068fb42774ec5fc65c065b2c312"),
+}
+
+func (s *mergeBaseSuite) commitsFromRevs(c *C, revs []string) ([]*Commit, error) {
+ var commits []*Commit
+ for _, rev := range revs {
+ hash, ok := revisionIndex[rev]
+ if !ok {
+ return nil, fmt.Errorf("Revision not found '%s'", rev)
+ }
+
+ commits = append(commits, s.commit(c, hash))
+ }
+
+ return commits, nil
+}
+
+// AssertMergeBase validates that the merge-base of the passed revs,
+// matches the expected result
+func (s *mergeBaseSuite) AssertMergeBase(c *C, revs, expectedRevs []string) {
+ c.Assert(revs, HasLen, 2)
+
+ commits, err := s.commitsFromRevs(c, revs)
+ c.Assert(err, IsNil)
+
+ results, err := commits[0].MergeBase(commits[1])
+ c.Assert(err, IsNil)
+
+ expected, err := s.commitsFromRevs(c, expectedRevs)
+ c.Assert(err, IsNil)
+
+ c.Assert(results, HasLen, len(expected))
+
+ alphabeticSortCommits(results)
+ alphabeticSortCommits(expected)
+ for i, commit := range results {
+ c.Assert(commit.Hash.String(), Equals, expected[i].Hash.String())
+ }
+}
+
+// AssertIndependents validates the independent commits of the passed list
+func (s *mergeBaseSuite) AssertIndependents(c *C, revs, expectedRevs []string) {
+ commits, err := s.commitsFromRevs(c, revs)
+ c.Assert(err, IsNil)
+
+ results, err := Independents(commits)
+ c.Assert(err, IsNil)
+
+ expected, err := s.commitsFromRevs(c, expectedRevs)
+ c.Assert(err, IsNil)
+
+ c.Assert(results, HasLen, len(expected))
+
+ alphabeticSortCommits(results)
+ alphabeticSortCommits(expected)
+ for i, commit := range results {
+ c.Assert(commit.Hash.String(), Equals, expected[i].Hash.String())
+ }
+}
+
+// AssertAncestor validates if the first rev is ancestor of the second one
+func (s *mergeBaseSuite) AssertAncestor(c *C, revs []string, shouldBeAncestor bool) {
+ c.Assert(revs, HasLen, 2)
+
+ commits, err := s.commitsFromRevs(c, revs)
+ c.Assert(err, IsNil)
+
+ isAncestor, err := commits[0].IsAncestor(commits[1])
+ c.Assert(err, IsNil)
+ c.Assert(isAncestor, Equals, shouldBeAncestor)
+}
+
+// TestNoAncestorsWhenNoCommonHistory validates that merge-base returns no commits
+// when there is no common history (M, N -> none)
+func (s *mergeBaseSuite) TestNoAncestorsWhenNoCommonHistory(c *C) {
+ revs := []string{"M", "N"}
+ nothing := []string{}
+ s.AssertMergeBase(c, revs, nothing)
+}
+
+// TestCommonAncestorInMergedOrphans validates that merge-base returns a common
+// ancestor in orphan branches when they where merged (A, B -> AB)
+func (s *mergeBaseSuite) TestCommonAncestorInMergedOrphans(c *C) {
+ revs := []string{"A", "B"}
+ expectedRevs := []string{"AB"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestMergeBaseWithSelf validates that merge-base between equal commits, returns
+// the same commit (A, A -> A)
+func (s *mergeBaseSuite) TestMergeBaseWithSelf(c *C) {
+ revs := []string{"A", "A"}
+ expectedRevs := []string{"A"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestMergeBaseWithAncestor validates that merge-base between a commit an its
+// ancestor returns the ancestor (Q, N -> N)
+func (s *mergeBaseSuite) TestMergeBaseWithAncestor(c *C) {
+ revs := []string{"Q", "N"}
+ expectedRevs := []string{"N"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestDoubleCommonAncestorInCrossMerge validates that merge-base returns two
+// common ancestors when there are cross merges (C, D -> CD1, CD2)
+func (s *mergeBaseSuite) TestDoubleCommonAncestorInCrossMerge(c *C) {
+ revs := []string{"C", "D"}
+ expectedRevs := []string{"CD1", "CD2"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestDoubleCommonInSubFeatureBranches validates that merge-base returns two
+// common ancestors when two branches where partially merged (G, Q -> GQ1, GQ2)
+func (s *mergeBaseSuite) TestDoubleCommonInSubFeatureBranches(c *C) {
+ revs := []string{"G", "Q"}
+ expectedRevs := []string{"GQ1", "GQ2"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestIndependentOnlyOne validates that Independents for one commit returns
+// that same commit (A -> A)
+func (s *mergeBaseSuite) TestIndependentOnlyOne(c *C) {
+ revs := []string{"A"}
+ expectedRevs := []string{"A"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentOnlyRepeated validates that Independents for one repeated commit
+// returns that same commit (A, A, A -> A)
+func (s *mergeBaseSuite) TestIndependentOnlyRepeated(c *C) {
+ revs := []string{"A", "A", "A"}
+ expectedRevs := []string{"A"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentWithRepeatedAncestors validates that Independents works well
+// when there are repeated ancestors (A, A, M, M, N -> A, N)
+func (s *mergeBaseSuite) TestIndependentWithRepeatedAncestors(c *C) {
+ revs := []string{"A", "A", "M", "M", "N"}
+ expectedRevs := []string{"A", "N"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentBeyondShortcut validates that Independents does not stop walking
+// in all paths when one of them is known (S, G, P -> S, G)
+func (s *mergeBaseSuite) TestIndependentBeyondShortcut(c *C) {
+ revs := []string{"S", "G", "P"}
+ expectedRevs := []string{"S", "G"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentBeyondShortcutBis validates that Independents does not stop walking
+// in all paths when one of them is known (CD1, CD2, M, N -> CD1, CD2)
+func (s *mergeBaseSuite) TestIndependentBeyondShortcutBis(c *C) {
+ revs := []string{"CD1", "CD2", "M", "N"}
+ expectedRevs := []string{"CD1", "CD2"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentWithPairOfAncestors validates that Independents excluded all
+// the ancestors (C, D, M, N -> C, D)
+func (s *mergeBaseSuite) TestIndependentWithPairOfAncestors(c *C) {
+ revs := []string{"C", "D", "M", "N"}
+ expectedRevs := []string{"C", "D"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentAcrossCrossMerges validates that Independents works well
+// along cross merges (C, G, dev, M -> C, G, dev)
+func (s *mergeBaseSuite) TestIndependentAcrossCrossMerges(c *C) {
+ revs := []string{"C", "G", "dev", "M", "N"}
+ expectedRevs := []string{"C", "G", "dev"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentChangingOrderRepetition validates that Independents works well
+// when the order and repetition is tricky (A, A^, A, N, N^ -> A, N)
+func (s *mergeBaseSuite) TestIndependentChangingOrderRepetition(c *C) {
+ revs := []string{"A", "A^", "A", "N", "N^"}
+ expectedRevs := []string{"A", "N"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentChangingOrder validates that Independents works well
+// when the order is tricky (A^^^, A^, A^^, A, N -> A, N)
+func (s *mergeBaseSuite) TestIndependentChangingOrder(c *C) {
+ revs := []string{"A^^^", "A^", "A^^", "A", "N"}
+ expectedRevs := []string{"A", "N"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestAncestor validates that IsAncestor returns true if walking from first
+// commit, through its parents, it can be reached the second ( A^^, A -> true )
+func (s *mergeBaseSuite) TestAncestor(c *C) {
+ revs := []string{"A^^", "A"}
+ s.AssertAncestor(c, revs, true)
+
+ revs = []string{"A", "A^^"}
+ s.AssertAncestor(c, revs, false)
+}
+
+// TestAncestorBeyondMerges validates that IsAncestor returns true also if first can be
+// be reached from first one even crossing merge commits in between ( M, G -> true )
+func (s *mergeBaseSuite) TestAncestorBeyondMerges(c *C) {
+ revs := []string{"M", "G"}
+ s.AssertAncestor(c, revs, true)
+
+ revs = []string{"G", "M"}
+ s.AssertAncestor(c, revs, false)
+}
+
+// TestAncestorSame validates that IsAncestor returns both are the same ( A, A -> true )
+func (s *mergeBaseSuite) TestAncestorSame(c *C) {
+ revs := []string{"A", "A"}
+ s.AssertAncestor(c, revs, true)
+}
+
+// TestAncestorUnrelated validates that IsAncestor returns false when the passed commits
+// does not share any history, no matter the order used ( M, N -> false )
+func (s *mergeBaseSuite) TestAncestorUnrelated(c *C) {
+ revs := []string{"M", "N"}
+ s.AssertAncestor(c, revs, false)
+
+ revs = []string{"N", "M"}
+ s.AssertAncestor(c, revs, false)
+}
diff --git a/plumbing/object/object.go b/plumbing/object/object.go
index e960e50..c48a18d 100644
--- a/plumbing/object/object.go
+++ b/plumbing/object/object.go
@@ -138,17 +138,19 @@ func (s *Signature) decodeTimeAndTimeZone(b []byte) {
return
}
- // Include a dummy year in this time.Parse() call to avoid a bug in Go:
- // https://github.com/golang/go/issues/19750
- //
- // Parsing the timezone with no other details causes the tl.Location() call
- // below to return time.Local instead of the parsed zone in some cases
- tl, err := time.Parse("2006 -0700", "1970 "+string(b[tzStart:tzStart+timeZoneLength]))
- if err != nil {
+ timezone := string(b[tzStart : tzStart+timeZoneLength])
+ tzhours, err1 := strconv.ParseInt(timezone[0:3], 10, 64)
+ tzmins, err2 := strconv.ParseInt(timezone[3:], 10, 64)
+ if err1 != nil || err2 != nil {
return
}
+ if tzhours < 0 {
+ tzmins *= -1
+ }
+
+ tz := time.FixedZone("", int(tzhours*60*60+tzmins*60))
- s.When = s.When.In(tl.Location())
+ s.When = s.When.In(tz)
}
func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error {
diff --git a/plumbing/object/patch.go b/plumbing/object/patch.go
index 1efd0b1..32454ac 100644
--- a/plumbing/object/patch.go
+++ b/plumbing/object/patch.go
@@ -278,7 +278,7 @@ func printStat(fileStats []FileStat) string {
var scaleFactor float64
if longestTotalChange > heightOfHistogram {
// Scale down to heightOfHistogram.
- scaleFactor = float64(longestTotalChange / heightOfHistogram)
+ scaleFactor = longestTotalChange / heightOfHistogram
} else {
scaleFactor = 1.0
}
diff --git a/plumbing/object/patch_test.go b/plumbing/object/patch_test.go
index 47057fb..37944c3 100644
--- a/plumbing/object/patch_test.go
+++ b/plumbing/object/patch_test.go
@@ -19,6 +19,7 @@ func (s *PatchSuite) TestStatsWithSubmodules(c *C) {
fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One().DotGit(), cache.NewObjectLRUDefault())
commit, err := GetCommit(storer, plumbing.NewHash("b685400c1f9316f350965a5993d350bc746b0bf4"))
+ c.Assert(err, IsNil)
tree, err := commit.Tree()
c.Assert(err, IsNil)
diff --git a/plumbing/object/tree.go b/plumbing/object/tree.go
index d30cf6e..d0b4fff 100644
--- a/plumbing/object/tree.go
+++ b/plumbing/object/tree.go
@@ -288,7 +288,7 @@ func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
return err
}
- if _, err = w.Write([]byte(entry.Hash[:])); err != nil {
+ if _, err = w.Write(entry.Hash[:]); err != nil {
return err
}
}
@@ -517,4 +517,4 @@ func simpleJoin(parent, child string) string {
return parent + "/" + child
}
return child
-} \ No newline at end of file
+}
diff --git a/plumbing/protocol/packp/advrefs.go b/plumbing/protocol/packp/advrefs.go
index 684e76a..487ee19 100644
--- a/plumbing/protocol/packp/advrefs.go
+++ b/plumbing/protocol/packp/advrefs.go
@@ -107,7 +107,7 @@ func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
return nil
}
- ref, err := s.Reference(plumbing.ReferenceName(plumbing.Master))
+ ref, err := s.Reference(plumbing.Master)
// check first if HEAD is pointing to master
if err == nil {
diff --git a/plumbing/protocol/packp/advrefs_decode.go b/plumbing/protocol/packp/advrefs_decode.go
index 1b4c62c..80f5b4e 100644
--- a/plumbing/protocol/packp/advrefs_decode.go
+++ b/plumbing/protocol/packp/advrefs_decode.go
@@ -53,7 +53,7 @@ func (d *advRefsDecoder) Decode(v *AdvRefs) error {
type decoderStateFn func(*advRefsDecoder) decoderStateFn
-// fills out the parser stiky error
+// fills out the parser sticky error
func (d *advRefsDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,
@@ -281,7 +281,7 @@ func decodeShallow(p *advRefsDecoder) decoderStateFn {
}
if len(p.line) == 0 {
- return nil // succesfull parse of the advertised-refs message
+ return nil // successful parse of the advertised-refs message
}
return decodeShallow
diff --git a/plumbing/protocol/packp/capability/list.go b/plumbing/protocol/packp/capability/list.go
index 26a79b6..9609211 100644
--- a/plumbing/protocol/packp/capability/list.go
+++ b/plumbing/protocol/packp/capability/list.go
@@ -14,8 +14,8 @@ var (
// ErrArguments is returned if arguments are given with a capabilities that
// not supports arguments
ErrArguments = errors.New("arguments not allowed")
- // ErrEmtpyArgument is returned when an empty value is given
- ErrEmtpyArgument = errors.New("empty argument")
+ // ErrEmptyArgument is returned when an empty value is given
+ ErrEmptyArgument = errors.New("empty argument")
// ErrMultipleArguments multiple argument given to a capabilities that not
// support it
ErrMultipleArguments = errors.New("multiple arguments not allowed")
@@ -119,7 +119,7 @@ func (l *List) Add(c Capability, values ...string) error {
func (l *List) validateNoEmptyArgs(values []string) error {
for _, v := range values {
if v == "" {
- return ErrEmtpyArgument
+ return ErrEmptyArgument
}
}
return nil
diff --git a/plumbing/protocol/packp/capability/list_test.go b/plumbing/protocol/packp/capability/list_test.go
index 82dd63f..61b0b13 100644
--- a/plumbing/protocol/packp/capability/list_test.go
+++ b/plumbing/protocol/packp/capability/list_test.go
@@ -176,7 +176,7 @@ func (s *SuiteCapabilities) TestAddErrArgumentsNotAllowed(c *check.C) {
func (s *SuiteCapabilities) TestAddErrArguments(c *check.C) {
cap := NewList()
err := cap.Add(SymRef, "")
- c.Assert(err, check.Equals, ErrEmtpyArgument)
+ c.Assert(err, check.Equals, ErrEmptyArgument)
}
func (s *SuiteCapabilities) TestAddErrMultipleArguments(c *check.C) {
diff --git a/plumbing/protocol/packp/ulreq.go b/plumbing/protocol/packp/ulreq.go
index 74109d8..72895e3 100644
--- a/plumbing/protocol/packp/ulreq.go
+++ b/plumbing/protocol/packp/ulreq.go
@@ -68,8 +68,8 @@ func NewUploadRequest() *UploadRequest {
}
// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest
-// value, the request capabilities are filled with the most optiomal ones, based
-// on the adv value (advertaised capabilities), the UploadRequest generated it
+// value, the request capabilities are filled with the most optimal ones, based
+// on the adv value (advertised capabilities), the UploadRequest generated it
// has no wants or shallows and an infinite depth.
func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
r := NewUploadRequest()
diff --git a/plumbing/protocol/packp/ulreq_encode.go b/plumbing/protocol/packp/ulreq_encode.go
index 89a5986..dcfeb83 100644
--- a/plumbing/protocol/packp/ulreq_encode.go
+++ b/plumbing/protocol/packp/ulreq_encode.go
@@ -64,10 +64,10 @@ func (e *ulReqEncoder) encodeFirstWant() stateFn {
return nil
}
- return e.encodeAditionalWants
+ return e.encodeAdditionalWants
}
-func (e *ulReqEncoder) encodeAditionalWants() stateFn {
+func (e *ulReqEncoder) encodeAdditionalWants() stateFn {
last := e.data.Wants[0]
for _, w := range e.data.Wants[1:] {
if bytes.Equal(last[:], w[:]) {
diff --git a/plumbing/protocol/packp/updreq_decode.go b/plumbing/protocol/packp/updreq_decode.go
index c15d49c..59f095f 100644
--- a/plumbing/protocol/packp/updreq_decode.go
+++ b/plumbing/protocol/packp/updreq_decode.go
@@ -13,9 +13,9 @@ import (
)
var (
- shallowLineLength = len(shallow) + hashSize
- minCommandLength = hashSize*2 + 2 + 1
- minCommandAndCapsLenth = minCommandLength + 1
+ shallowLineLength = len(shallow) + hashSize
+ minCommandLength = hashSize*2 + 2 + 1
+ minCommandAndCapsLength = minCommandLength + 1
)
var (
@@ -46,7 +46,7 @@ func errInvalidShallowLineLength(got int) error {
func errInvalidCommandCapabilitiesLineLength(got int) error {
return errMalformedRequest(fmt.Sprintf(
"invalid command and capabilities line length: expected at least %d, got %d",
- minCommandAndCapsLenth, got))
+ minCommandAndCapsLength, got))
}
func errInvalidCommandLineLength(got int) error {
@@ -174,7 +174,7 @@ func (d *updReqDecoder) decodeCommandAndCapabilities() error {
return errMissingCapabilitiesDelimiter
}
- if len(b) < minCommandAndCapsLenth {
+ if len(b) < minCommandAndCapsLength {
return errInvalidCommandCapabilitiesLineLength(len(b))
}
@@ -225,7 +225,7 @@ func parseCommand(b []byte) (*Command, error) {
return nil, errInvalidNewObjId(err)
}
- return &Command{Old: oh, New: nh, Name: plumbing.ReferenceName(n)}, nil
+ return &Command{Old: oh, New: nh, Name: n}, nil
}
func parseHash(s string) (plumbing.Hash, error) {
diff --git a/plumbing/protocol/packp/uppackreq.go b/plumbing/protocol/packp/uppackreq.go
index 1144139..831ef8f 100644
--- a/plumbing/protocol/packp/uppackreq.go
+++ b/plumbing/protocol/packp/uppackreq.go
@@ -27,8 +27,8 @@ func NewUploadPackRequest() *UploadPackRequest {
}
// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and
-// returns a pointer. The request capabilities are filled with the most optiomal
-// ones, based on the adv value (advertaised capabilities), the UploadPackRequest
+// returns a pointer. The request capabilities are filled with the most optimal
+// ones, based on the adv value (advertised capabilities), the UploadPackRequest
// it has no wants, haves or shallows and an infinite depth
func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest {
ur := NewUploadRequestFromCapabilities(adv)
diff --git a/plumbing/storer/object.go b/plumbing/storer/object.go
index 98d1ec3..c84960a 100644
--- a/plumbing/storer/object.go
+++ b/plumbing/storer/object.go
@@ -141,7 +141,7 @@ func NewEncodedObjectLookupIter(
// Next returns the next object from the iterator. If the iterator has reached
// the end it will return io.EOF as an error. If the object can't be found in
// the object storage, it will return plumbing.ErrObjectNotFound as an error.
-// If the object is retreieved successfully error will be nil.
+// If the object is retrieved successfully error will be nil.
func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) {
if iter.pos >= len(iter.series) {
return nil, io.EOF
@@ -187,7 +187,7 @@ func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSl
}
// Next returns the next object from the iterator. If the iterator has reached
-// the end it will return io.EOF as an error. If the object is retreieved
+// the end it will return io.EOF as an error. If the object is retrieved
// successfully error will be nil.
func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) {
if len(iter.series) == 0 {
diff --git a/plumbing/transport/http/common.go b/plumbing/transport/http/common.go
index 5d3535e..16ff930 100644
--- a/plumbing/transport/http/common.go
+++ b/plumbing/transport/http/common.go
@@ -84,7 +84,7 @@ var DefaultClient = NewClient(nil)
// Unless a properly initialized client is given, it will fall back into
// `http.DefaultClient`.
//
-// Note that for HTTP client cannot distinguist between private repositories and
+// Note that for HTTP client cannot distinguish between private repositories and
// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired`
// for both.
func NewClient(c *http.Client) transport.Transport {
@@ -139,7 +139,7 @@ func (s *session) ApplyAuthToRequest(req *http.Request) {
return
}
- s.auth.setAuth(req)
+ s.auth.SetAuth(req)
}
func (s *session) ModifyEndpointIfRedirect(res *http.Response) {
@@ -175,7 +175,7 @@ func (*session) Close() error {
// AuthMethod is concrete implementation of common.AuthMethod for HTTP services
type AuthMethod interface {
transport.AuthMethod
- setAuth(r *http.Request)
+ SetAuth(r *http.Request)
}
func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth {
@@ -192,7 +192,7 @@ type BasicAuth struct {
Username, Password string
}
-func (a *BasicAuth) setAuth(r *http.Request) {
+func (a *BasicAuth) SetAuth(r *http.Request) {
if a == nil {
return
}
@@ -226,7 +226,7 @@ type TokenAuth struct {
Token string
}
-func (a *TokenAuth) setAuth(r *http.Request) {
+func (a *TokenAuth) SetAuth(r *http.Request) {
if a == nil {
return
}
diff --git a/plumbing/transport/http/common_test.go b/plumbing/transport/http/common_test.go
index 8b300e8..d9e0636 100644
--- a/plumbing/transport/http/common_test.go
+++ b/plumbing/transport/http/common_test.go
@@ -64,7 +64,7 @@ func (s *ClientSuite) TestNewTokenAuth(c *C) {
// Check header is set correctly
req, err := http.NewRequest("GET", "https://github.com/git-fixtures/basic", nil)
c.Assert(err, Equals, nil)
- a.setAuth(req)
+ a.SetAuth(req)
c.Assert(req.Header.Get("Authorization"), Equals, "Bearer OAUTH-TOKEN-TEXT")
}
diff --git a/plumbing/transport/internal/common/common.go b/plumbing/transport/internal/common/common.go
index 00497f3..cb1b6da 100644
--- a/plumbing/transport/internal/common/common.go
+++ b/plumbing/transport/internal/common/common.go
@@ -66,7 +66,7 @@ type Command interface {
Close() error
}
-// CommandKiller expands the Command interface, enableing it for being killed.
+// CommandKiller expands the Command interface, enabling it for being killed.
type CommandKiller interface {
// Kill and close the session whatever the state it is. It will block until
// the command is terminated.
diff --git a/plumbing/transport/internal/common/common_test.go b/plumbing/transport/internal/common/common_test.go
index b2f035d..c60ef3b 100644
--- a/plumbing/transport/internal/common/common_test.go
+++ b/plumbing/transport/internal/common/common_test.go
@@ -13,7 +13,7 @@ type CommonSuite struct{}
var _ = Suite(&CommonSuite{})
-func (s *CommonSuite) TestIsRepoNotFoundErrorForUnknowSource(c *C) {
+func (s *CommonSuite) TestIsRepoNotFoundErrorForUnknownSource(c *C) {
msg := "unknown system is complaining of something very sad :("
isRepoNotFound := isRepoNotFoundError(msg)
diff --git a/plumbing/transport/server/server.go b/plumbing/transport/server/server.go
index 20bd12e..8e0dcc1 100644
--- a/plumbing/transport/server/server.go
+++ b/plumbing/transport/server/server.go
@@ -286,11 +286,6 @@ func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) {
continue
}
- if err != nil {
- s.setStatus(cmd.Name, err)
- continue
- }
-
ref := plumbing.NewHashReference(cmd.Name, cmd.New)
err := s.storer.SetReference(ref)
s.setStatus(cmd.Name, err)
diff --git a/plumbing/transport/ssh/auth_method.go b/plumbing/transport/ssh/auth_method.go
index dbb47c5..1e5c383 100644
--- a/plumbing/transport/ssh/auth_method.go
+++ b/plumbing/transport/ssh/auth_method.go
@@ -61,7 +61,7 @@ func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{
- ssh.KeyboardInteractiveChallenge(a.Challenge),
+ a.Challenge,
},
})
}