diff options
Diffstat (limited to 'plumbing')
27 files changed, 801 insertions, 182 deletions
diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go index df25530..ac57d08 100644 --- a/plumbing/format/index/decoder.go +++ b/plumbing/format/index/decoder.go @@ -261,6 +261,17 @@ func (d *Decoder) readExtension(idx *Index, header []byte) error { if err := d.Decode(idx.ResolveUndo); err != nil { return err } + case bytes.Equal(header, endOfIndexEntryExtSignature): + r, err := d.getExtensionReader() + if err != nil { + return err + } + + idx.EndOfIndexEntry = &EndOfIndexEntry{} + d := &endOfIndexEntryDecoder{r} + if err := d.Decode(idx.EndOfIndexEntry); err != nil { + return err + } default: return errUnknownExtension } @@ -449,3 +460,17 @@ func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { return nil } + +type endOfIndexEntryDecoder struct { + r io.Reader +} + +func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { + var err error + e.Offset, err = binary.ReadUint32(d.r) + if err != nil { + return err + } + + return binary.Read(d.r, &e.Hash) +} diff --git a/plumbing/format/index/decoder_test.go b/plumbing/format/index/decoder_test.go index b612ebb..7468ad0 100644 --- a/plumbing/format/index/decoder_test.go +++ b/plumbing/format/index/decoder_test.go @@ -202,3 +202,19 @@ func (s *IndexSuite) TestDecodeV4(c *C) { c.Assert(idx.Entries[6].IntentToAdd, Equals, true) c.Assert(idx.Entries[6].SkipWorktree, Equals, false) } + +func (s *IndexSuite) TestDecodeEndOfIndexEntry(c *C) { + f, err := fixtures.Basic().ByTag("end-of-index-entry").One().DotGit().Open("index") + c.Assert(err, IsNil) + defer func() { c.Assert(f.Close(), IsNil) }() + + idx := &Index{} + d := NewDecoder(f) + err = d.Decode(idx) + c.Assert(err, IsNil) + + c.Assert(idx.Version, Equals, uint32(2)) + c.Assert(idx.EndOfIndexEntry, NotNil) + c.Assert(idx.EndOfIndexEntry.Offset, Equals, uint32(716)) + c.Assert(idx.EndOfIndexEntry.Hash.String(), Equals, "922e89d9ffd7cefce93a211615b2053c0f42bd78") +} diff --git a/plumbing/format/index/doc.go b/plumbing/format/index/doc.go index d1e7b33..f2b3d76 100644 --- a/plumbing/format/index/doc.go +++ b/plumbing/format/index/doc.go @@ -297,5 +297,64 @@ // in the previous ewah bitmap. // // - One NUL. -// Source https://www.kernel.org/pub/software/scm/git/docs/technical/index-format.txt +// +// == File System Monitor cache +// +// The file system monitor cache tracks files for which the core.fsmonitor +// hook has told us about changes. The signature for this extension is +// { 'F', 'S', 'M', 'N' }. +// +// The extension starts with +// +// - 32-bit version number: the current supported version is 1. +// +// - 64-bit time: the extension data reflects all changes through the given +// time which is stored as the nanoseconds elapsed since midnight, +// January 1, 1970. +// +// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap. +// +// - An ewah bitmap, the n-th bit indicates whether the n-th index entry +// is not CE_FSMONITOR_VALID. +// +// == End of Index Entry +// +// The End of Index Entry (EOIE) is used to locate the end of the variable +// length index entries and the begining of the extensions. Code can take +// advantage of this to quickly locate the index extensions without having +// to parse through all of the index entries. +// +// Because it must be able to be loaded before the variable length cache +// entries and other index extensions, this extension must be written last. +// The signature for this extension is { 'E', 'O', 'I', 'E' }. +// +// The extension consists of: +// +// - 32-bit offset to the end of the index entries +// +// - 160-bit SHA-1 over the extension types and their sizes (but not +// their contents). E.g. if we have "TREE" extension that is N-bytes +// long, "REUC" extension that is M-bytes long, followed by "EOIE", +// then the hash would be: +// +// SHA-1("TREE" + <binary representation of N> + +// "REUC" + <binary representation of M>) +// +// == Index Entry Offset Table +// +// The Index Entry Offset Table (IEOT) is used to help address the CPU +// cost of loading the index by enabling multi-threading the process of +// converting cache entries from the on-disk format to the in-memory format. +// The signature for this extension is { 'I', 'E', 'O', 'T' }. +// +// The extension consists of: +// +// - 32-bit version (currently 1) +// +// - A number of index offset entries each consisting of: +// +// - 32-bit offset from the begining of the file to the first cache entry +// in this block of entries. +// +// - 32-bit count of cache entries in this blockpackage index package index diff --git a/plumbing/format/index/index.go b/plumbing/format/index/index.go index fc7b8cd..6c4b7ca 100644 --- a/plumbing/format/index/index.go +++ b/plumbing/format/index/index.go @@ -18,9 +18,10 @@ var ( // ErrEntryNotFound is returned by Index.Entry, if an entry is not found. ErrEntryNotFound = errors.New("entry not found") - indexSignature = []byte{'D', 'I', 'R', 'C'} - treeExtSignature = []byte{'T', 'R', 'E', 'E'} - resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'} + indexSignature = []byte{'D', 'I', 'R', 'C'} + treeExtSignature = []byte{'T', 'R', 'E', 'E'} + resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'} + endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'} ) // Stage during merge @@ -50,6 +51,8 @@ type Index struct { Cache *Tree // ResolveUndo represents the 'Resolve undo' extension ResolveUndo *ResolveUndo + // EndOfIndexEntry represents the 'End of Index Entry' extension + EndOfIndexEntry *EndOfIndexEntry } // Add creates a new Entry and returns it. The caller should first check that @@ -193,3 +196,18 @@ type ResolveUndoEntry struct { Path string Stages map[Stage]plumbing.Hash } + +// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of +// the variable length index entries and the begining of the extensions. Code +// can take advantage of this to quickly locate the index extensions without +// having to parse through all of the index entries. +// +// Because it must be able to be loaded before the variable length cache +// entries and other index extensions, this extension must be written last. +type EndOfIndexEntry struct { + // Offset to the end of the index entries + Offset uint32 + // Hash is a SHA-1 over the extension types and their sizes (but not + // their contents). + Hash plumbing.Hash +} diff --git a/plumbing/format/packfile/common.go b/plumbing/format/packfile/common.go index 2b4aceb..0d9ed54 100644 --- a/plumbing/format/packfile/common.go +++ b/plumbing/format/packfile/common.go @@ -51,7 +51,13 @@ func WritePackfileToObjectStorage( } defer ioutil.CheckClose(w, &err) - _, err = io.Copy(w, packfile) + + var n int64 + n, err = io.Copy(w, packfile) + if err == nil && n == 0 { + return ErrEmptyPackfile + } + return err } diff --git a/plumbing/format/packfile/common_test.go b/plumbing/format/packfile/common_test.go index 387c0d1..eafc617 100644 --- a/plumbing/format/packfile/common_test.go +++ b/plumbing/format/packfile/common_test.go @@ -1,15 +1,29 @@ package packfile import ( + "bytes" "testing" "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } +type CommonSuite struct{} + +var _ = Suite(&CommonSuite{}) + +func (s *CommonSuite) TestEmptyUpdateObjectStorage(c *C) { + var buf bytes.Buffer + sto := memory.NewStorage() + + err := UpdateObjectStorage(sto, &buf) + c.Assert(err, Equals, ErrEmptyPackfile) +} + func newObject(t plumbing.ObjectType, cont []byte) plumbing.EncodedObject { o := plumbing.MemoryObject{} o.SetType(t) diff --git a/plumbing/format/packfile/fsobject.go b/plumbing/format/packfile/fsobject.go index 330cb73..a268bce 100644 --- a/plumbing/format/packfile/fsobject.go +++ b/plumbing/format/packfile/fsobject.go @@ -48,7 +48,7 @@ func NewFSObject( // Reader implements the plumbing.EncodedObject interface. func (o *FSObject) Reader() (io.ReadCloser, error) { obj, ok := o.cache.Get(o.hash) - if ok { + if ok && obj != o { reader, err := obj.Reader() if err != nil { return nil, err diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go index 0d13066..69b6e85 100644 --- a/plumbing/format/packfile/packfile.go +++ b/plumbing/format/packfile/packfile.go @@ -21,6 +21,16 @@ var ( ErrZLib = NewError("zlib reading error") ) +// When reading small objects from packfile it is beneficial to do so at +// once to exploit the buffered I/O. In many cases the objects are so small +// that they were already loaded to memory when the object header was +// loaded from the packfile. Wrapping in FSObject would cause this buffered +// data to be thrown away and then re-read later, with the additional +// seeking causing reloads from disk. Objects smaller than this threshold +// are now always read into memory and stored in cache instead of being +// wrapped in FSObject. +const smallObjectThreshold = 16 * 1024 + // Packfile allows retrieving information from inside a packfile. type Packfile struct { idxfile.Index @@ -79,15 +89,7 @@ func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { } } - if _, err := p.s.SeekFromStart(o); err != nil { - if err == io.EOF || isInvalid(err) { - return nil, plumbing.ErrObjectNotFound - } - - return nil, err - } - - return p.nextObject() + return p.objectAtOffset(o) } // GetSizeByOffset retrieves the size of the encoded object from the @@ -105,69 +107,19 @@ func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) { if err != nil { return 0, err } - return h.Length, nil + return p.getObjectSize(h) } -func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { - h, err := p.s.NextObjectHeader() +func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) { + h, err := p.s.SeekObjectHeader(offset) p.s.pendingObject = nil return h, err } -func (p *Packfile) getObjectData( - h *ObjectHeader, -) (typ plumbing.ObjectType, size int64, err error) { - switch h.Type { - case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: - typ = h.Type - size = h.Length - case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - - _, _, err = p.s.NextObject(buf) - if err != nil { - return - } - - delta := buf.Bytes() - _, delta = decodeLEB128(delta) // skip src size - sz, _ := decodeLEB128(delta) - size = int64(sz) - - var offset int64 - if h.Type == plumbing.REFDeltaObject { - offset, err = p.FindOffset(h.Reference) - if err != nil { - return - } - } else { - offset = h.OffsetReference - } - - if baseType, ok := p.offsetToType[offset]; ok { - typ = baseType - } else { - if _, err = p.s.SeekFromStart(offset); err != nil { - return - } - - h, err = p.nextObjectHeader() - if err != nil { - return - } - - typ, _, err = p.getObjectData(h) - if err != nil { - return - } - } - default: - err = ErrInvalidObject.AddDetails("type %q", h.Type) - } - - return +func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { + h, err := p.s.NextObjectHeader() + p.s.pendingObject = nil + return h, err } func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { @@ -210,11 +162,7 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err if baseType, ok := p.offsetToType[offset]; ok { typ = baseType } else { - if _, err = p.s.SeekFromStart(offset); err != nil { - return - } - - h, err = p.nextObjectHeader() + h, err = p.objectHeaderAtOffset(offset) if err != nil { return } @@ -231,8 +179,8 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err return } -func (p *Packfile) nextObject() (plumbing.EncodedObject, error) { - h, err := p.nextObjectHeader() +func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) { + h, err := p.objectHeaderAtOffset(offset) if err != nil { if err == io.EOF || isInvalid(err) { return nil, plumbing.ErrObjectNotFound @@ -246,6 +194,13 @@ func (p *Packfile) nextObject() (plumbing.EncodedObject, error) { return p.getNextObject(h) } + // If the object is not a delta and it's small enough then read it + // completely into memory now since it is already read from disk + // into buffer anyway. + if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { + return p.getNextObject(h) + } + hash, err := p.FindHash(h.Offset) if err != nil { return nil, err @@ -289,11 +244,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { } } - if _, err := p.s.SeekFromStart(offset); err != nil { - return nil, err - } - - h, err := p.nextObjectHeader() + h, err := p.objectHeaderAtOffset(offset) if err != nil { return nil, err } @@ -385,8 +336,6 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset if err != nil { return err } - - p.cachePut(base) } obj.SetType(base.Type()) diff --git a/plumbing/format/packfile/packfile_test.go b/plumbing/format/packfile/packfile_test.go index 05dc8a7..455fe65 100644 --- a/plumbing/format/packfile/packfile_test.go +++ b/plumbing/format/packfile/packfile_test.go @@ -277,3 +277,29 @@ func getIndexFromIdxFile(r io.Reader) idxfile.Index { return idxf } + +func (s *PackfileSuite) TestSize(c *C) { + f := fixtures.Basic().ByTag("ref-delta").One() + + index := getIndexFromIdxFile(f.Idx()) + fs := osfs.New("") + pf, err := fs.Open(f.Packfile().Name()) + c.Assert(err, IsNil) + + packfile := packfile.NewPackfile(index, fs, pf) + defer packfile.Close() + + // Get the size of binary.jpg, which is not delta-encoded. + offset, err := packfile.FindOffset(plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d")) + c.Assert(err, IsNil) + size, err := packfile.GetSizeByOffset(offset) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(76110)) + + // Get the size of the root commit, which is delta-encoded. + offset, err = packfile.FindOffset(f.Head) + c.Assert(err, IsNil) + size, err = packfile.GetSizeByOffset(offset) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(245)) +} diff --git a/plumbing/format/packfile/parser.go b/plumbing/format/packfile/parser.go index 28582b5..71cbba9 100644 --- a/plumbing/format/packfile/parser.go +++ b/plumbing/format/packfile/parser.go @@ -38,15 +38,14 @@ type Observer interface { // Parser decodes a packfile and calls any observer associated to it. Is used // to generate indexes. type Parser struct { - storage storer.EncodedObjectStorer - scanner *Scanner - count uint32 - oi []*objectInfo - oiByHash map[plumbing.Hash]*objectInfo - oiByOffset map[int64]*objectInfo - hashOffset map[plumbing.Hash]int64 - pendingRefDeltas map[plumbing.Hash][]*objectInfo - checksum plumbing.Hash + storage storer.EncodedObjectStorer + scanner *Scanner + count uint32 + oi []*objectInfo + oiByHash map[plumbing.Hash]*objectInfo + oiByOffset map[int64]*objectInfo + hashOffset map[plumbing.Hash]int64 + checksum plumbing.Hash cache *cache.BufferLRU // delta content by offset, only used if source is not seekable @@ -78,13 +77,12 @@ func NewParserWithStorage( } return &Parser{ - storage: storage, - scanner: scanner, - ob: ob, - count: 0, - cache: cache.NewBufferLRUDefault(), - pendingRefDeltas: make(map[plumbing.Hash][]*objectInfo), - deltas: deltas, + storage: storage, + scanner: scanner, + ob: ob, + count: 0, + cache: cache.NewBufferLRUDefault(), + deltas: deltas, }, nil } @@ -150,10 +148,6 @@ func (p *Parser) Parse() (plumbing.Hash, error) { return plumbing.ZeroHash, err } - if len(p.pendingRefDeltas) > 0 { - return plumbing.ZeroHash, ErrReferenceDeltaNotFound - } - if err := p.onFooter(p.checksum); err != nil { return plumbing.ZeroHash, err } @@ -205,18 +199,21 @@ func (p *Parser) indexObjects() error { parent.Children = append(parent.Children, ota) case plumbing.REFDeltaObject: delta = true - parent, ok := p.oiByHash[oh.Reference] - if ok { - ota = newDeltaObject(oh.Offset, oh.Length, t, parent) - parent.Children = append(parent.Children, ota) - } else { - ota = newBaseObject(oh.Offset, oh.Length, t) - p.pendingRefDeltas[oh.Reference] = append( - p.pendingRefDeltas[oh.Reference], - ota, - ) + if !ok { + // can't find referenced object in this pack file + // this must be a "thin" pack. + parent = &objectInfo{ //Placeholder parent + SHA1: oh.Reference, + ExternalRef: true, // mark as an external reference that must be resolved + Type: plumbing.AnyObject, + DiskType: plumbing.AnyObject, + } + p.oiByHash[oh.Reference] = parent } + ota = newDeltaObject(oh.Offset, oh.Length, t, parent) + parent.Children = append(parent.Children, ota) + default: ota = newBaseObject(oh.Offset, oh.Length, t) } @@ -297,16 +294,20 @@ func (p *Parser) resolveDeltas() error { return nil } -func (p *Parser) get(o *objectInfo) ([]byte, error) { - b, ok := p.cache.Get(o.Offset) +func (p *Parser) get(o *objectInfo) (b []byte, err error) { + var ok bool + if !o.ExternalRef { // skip cache check for placeholder parents + b, ok = p.cache.Get(o.Offset) + } + // If it's not on the cache and is not a delta we can try to find it in the - // storage, if there's one. + // storage, if there's one. External refs must enter here. if !ok && p.storage != nil && !o.Type.IsDelta() { - var err error e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1) if err != nil { return nil, err } + o.Type = e.Type() r, err := e.Reader() if err != nil { @@ -323,6 +324,11 @@ func (p *Parser) get(o *objectInfo) ([]byte, error) { return b, nil } + if o.ExternalRef { + // we were not able to resolve a ref in a thin pack + return nil, ErrReferenceDeltaNotFound + } + var data []byte if o.DiskType.IsDelta() { base, err := p.get(o.Parent) @@ -335,7 +341,6 @@ func (p *Parser) get(o *objectInfo) ([]byte, error) { return nil, err } } else { - var err error data, err = p.readData(o) if err != nil { return nil, err @@ -367,14 +372,6 @@ func (p *Parser) resolveObject( return nil, err } - if pending, ok := p.pendingRefDeltas[o.SHA1]; ok { - for _, po := range pending { - po.Parent = o - o.Children = append(o.Children, po) - } - delete(p.pendingRefDeltas, o.SHA1) - } - if p.storage != nil { obj := new(plumbing.MemoryObject) obj.SetSize(o.Size()) @@ -401,11 +398,7 @@ func (p *Parser) readData(o *objectInfo) ([]byte, error) { return data, nil } - if _, err := p.scanner.SeekFromStart(o.Offset); err != nil { - return nil, err - } - - if _, err := p.scanner.NextObjectHeader(); err != nil { + if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil { return nil, err } @@ -447,10 +440,11 @@ func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) { } type objectInfo struct { - Offset int64 - Length int64 - Type plumbing.ObjectType - DiskType plumbing.ObjectType + Offset int64 + Length int64 + Type plumbing.ObjectType + DiskType plumbing.ObjectType + ExternalRef bool // indicates this is an external reference in a thin pack file Crc32 uint32 diff --git a/plumbing/format/packfile/parser_test.go b/plumbing/format/packfile/parser_test.go index 012a140..6e7c84b 100644 --- a/plumbing/format/packfile/parser_test.go +++ b/plumbing/format/packfile/parser_test.go @@ -1,10 +1,13 @@ package packfile_test import ( + "io" "testing" + git "gopkg.in/src-d/go-git.v4" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" + "gopkg.in/src-d/go-git.v4/plumbing/storer" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" @@ -74,6 +77,53 @@ func (s *ParserSuite) TestParserHashes(c *C) { c.Assert(obs.objects, DeepEquals, objs) } +func (s *ParserSuite) TestThinPack(c *C) { + + // Initialize an empty repository + fs, err := git.PlainInit(c.MkDir(), true) + c.Assert(err, IsNil) + + // Try to parse a thin pack without having the required objects in the repo to + // see if the correct errors are returned + thinpack := fixtures.ByTag("thinpack").One() + scanner := packfile.NewScanner(thinpack.Packfile()) + parser, err := packfile.NewParserWithStorage(scanner, fs.Storer) // ParserWithStorage writes to the storer all parsed objects! + c.Assert(err, IsNil) + + _, err = parser.Parse() + c.Assert(err, Equals, plumbing.ErrObjectNotFound) + + // start over with a clean repo + fs, err = git.PlainInit(c.MkDir(), true) + c.Assert(err, IsNil) + + // Now unpack a base packfile into our empty repo: + f := fixtures.ByURL("https://github.com/spinnaker/spinnaker.git").One() + w, err := fs.Storer.(storer.PackfileWriter).PackfileWriter() + c.Assert(err, IsNil) + _, err = io.Copy(w, f.Packfile()) + c.Assert(err, IsNil) + w.Close() + + // Check that the test object that will come with our thin pack is *not* in the repo + _, err = fs.Storer.EncodedObject(plumbing.CommitObject, thinpack.Head) + c.Assert(err, Equals, plumbing.ErrObjectNotFound) + + // Now unpack the thin pack: + scanner = packfile.NewScanner(thinpack.Packfile()) + parser, err = packfile.NewParserWithStorage(scanner, fs.Storer) // ParserWithStorage writes to the storer all parsed objects! + c.Assert(err, IsNil) + + h, err := parser.Parse() + c.Assert(err, IsNil) + c.Assert(h, Equals, plumbing.NewHash("1288734cbe0b95892e663221d94b95de1f5d7be8")) + + // Check that our test object is now accessible + _, err = fs.Storer.EncodedObject(plumbing.CommitObject, thinpack.Head) + c.Assert(err, IsNil) + +} + type observerObject struct { hash string otype plumbing.ObjectType diff --git a/plumbing/format/packfile/scanner.go b/plumbing/format/packfile/scanner.go index 6fc183b..614b0d1 100644 --- a/plumbing/format/packfile/scanner.go +++ b/plumbing/format/packfile/scanner.go @@ -138,14 +138,52 @@ func (s *Scanner) readCount() (uint32, error) { return binary.ReadUint32(s.r) } +// SeekObjectHeader seeks to specified offset and returns the ObjectHeader +// for the next object in the reader +func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) { + // if seeking we assume that you are not interested in the header + if s.version == 0 { + s.version = VersionSupported + } + + if _, err := s.r.Seek(offset, io.SeekStart); err != nil { + return nil, err + } + + h, err := s.nextObjectHeader() + if err != nil { + return nil, err + } + + h.Offset = offset + return h, nil +} + // NextObjectHeader returns the ObjectHeader for the next object in the reader func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { - defer s.Flush() - if err := s.doPending(); err != nil { return nil, err } + offset, err := s.r.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + + h, err := s.nextObjectHeader() + if err != nil { + return nil, err + } + + h.Offset = offset + return h, nil +} + +// nextObjectHeader returns the ObjectHeader for the next object in the reader +// without the Offset field +func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) { + defer s.Flush() + s.crc.Reset() h := &ObjectHeader{} @@ -308,7 +346,7 @@ var byteSlicePool = sync.Pool{ // SeekFromStart sets a new offset from start, returns the old position before // the change. func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { - // if seeking we assume that you are not interested on the header + // if seeking we assume that you are not interested in the header if s.version == 0 { s.version = VersionSupported } @@ -385,7 +423,7 @@ type bufferedSeeker struct { } func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) { - if whence == io.SeekCurrent { + if whence == io.SeekCurrent && offset == 0 { current, err := r.r.Seek(offset, whence) if err != nil { return current, err diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go index 644d0eb..091b457 100644 --- a/plumbing/format/packfile/scanner_test.go +++ b/plumbing/format/packfile/scanner_test.go @@ -118,6 +118,23 @@ func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObjectNonSeekable(c *C) { c.Assert(n, Equals, f.PackfileHash) } +func (s *ScannerSuite) TestSeekObjectHeader(c *C) { + r := fixtures.Basic().One().Packfile() + p := NewScanner(r) + + h, err := p.SeekObjectHeader(expectedHeadersOFS[4].Offset) + c.Assert(err, IsNil) + c.Assert(h, DeepEquals, &expectedHeadersOFS[4]) +} + +func (s *ScannerSuite) TestSeekObjectHeaderNonSeekable(c *C) { + r := io.MultiReader(fixtures.Basic().One().Packfile()) + p := NewScanner(r) + + _, err := p.SeekObjectHeader(expectedHeadersOFS[4].Offset) + c.Assert(err, Equals, ErrSeekNotSupported) +} + var expectedHeadersOFS = []ObjectHeader{ {Type: plumbing.CommitObject, Offset: 12, Length: 254}, {Type: plumbing.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12}, diff --git a/plumbing/object/commit_walker.go b/plumbing/object/commit_walker.go index 40ad258..0eff059 100644 --- a/plumbing/object/commit_walker.go +++ b/plumbing/object/commit_walker.go @@ -1,10 +1,12 @@ package object import ( + "container/list" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/storage" ) type commitPreIterator struct { @@ -181,3 +183,145 @@ func (w *commitPostIterator) ForEach(cb func(*Commit) error) error { } func (w *commitPostIterator) Close() {} + +// commitAllIterator stands for commit iterator for all refs. +type commitAllIterator struct { + // currCommit points to the current commit. + currCommit *list.Element +} + +// NewCommitAllIter returns a new commit iterator for all refs. +// repoStorer is a repo Storer used to get commits and references. +// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order +func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) { + commitsPath := list.New() + commitsLookup := make(map[plumbing.Hash]*list.Element) + head, err := storer.ResolveReference(repoStorer, plumbing.HEAD) + if err == nil { + err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup) + } + + if err != nil && err != plumbing.ErrReferenceNotFound { + return nil, err + } + + // add all references along with the HEAD + refIter, err := repoStorer.IterReferences() + if err != nil { + return nil, err + } + defer refIter.Close() + + for { + ref, err := refIter.Next() + if err == io.EOF { + break + } + + if err == plumbing.ErrReferenceNotFound { + continue + } + + if err != nil { + return nil, err + } + + if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil { + return nil, err + } + } + + return &commitAllIterator{commitsPath.Front()}, nil +} + +func addReference( + repoStorer storage.Storer, + commitIterFunc func(*Commit) CommitIter, + ref *plumbing.Reference, + commitsPath *list.List, + commitsLookup map[plumbing.Hash]*list.Element) error { + + _, exists := commitsLookup[ref.Hash()] + if exists { + // we already have it - skip the reference. + return nil + } + + refCommit, _ := GetCommit(repoStorer, ref.Hash()) + if refCommit == nil { + // if it's not a commit - skip it. + return nil + } + + var ( + refCommits []*Commit + parent *list.Element + ) + // collect all ref commits to add + commitIter := commitIterFunc(refCommit) + for c, e := commitIter.Next(); e == nil; { + parent, exists = commitsLookup[c.Hash] + if exists { + break + } + refCommits = append(refCommits, c) + c, e = commitIter.Next() + } + commitIter.Close() + + if parent == nil { + // common parent - not found + // add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet) + for _, c := range refCommits { + parent = commitsPath.PushBack(c) + commitsLookup[c.Hash] = parent + } + } else { + // add ref's commits to the path in reverse order (from the latest) + for i := len(refCommits) - 1; i >= 0; i-- { + c := refCommits[i] + // insert before found common parent + parent = commitsPath.InsertBefore(c, parent) + commitsLookup[c.Hash] = parent + } + } + + return nil +} + +func (it *commitAllIterator) Next() (*Commit, error) { + if it.currCommit == nil { + return nil, io.EOF + } + + c := it.currCommit.Value.(*Commit) + it.currCommit = it.currCommit.Next() + + return c, nil +} + +func (it *commitAllIterator) ForEach(cb func(*Commit) error) error { + for { + c, err := it.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (it *commitAllIterator) Close() { + it.currCommit = nil +} diff --git a/plumbing/object/commit_walker_file.go b/plumbing/object/commit_walker_file.go index 84e738a..6f16e61 100644 --- a/plumbing/object/commit_walker_file.go +++ b/plumbing/object/commit_walker_file.go @@ -1,23 +1,30 @@ package object import ( - "gopkg.in/src-d/go-git.v4/plumbing/storer" "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + + "gopkg.in/src-d/go-git.v4/plumbing/storer" ) type commitFileIter struct { fileName string sourceIter CommitIter currentCommit *Commit + checkParent bool } // NewCommitFileIterFromIter returns a commit iterator which performs diffTree between // successive trees returned from the commit iterator from the argument. The purpose of this is // to find the commits that explain how the files that match the path came to be. -func NewCommitFileIterFromIter(fileName string, commitIter CommitIter) CommitIter { +// If checkParent is true then the function double checks if potential parent (next commit in a path) +// is one of the parents in the tree (it's used by `git log --all`). +func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter { iterator := new(commitFileIter) iterator.sourceIter = commitIter iterator.fileName = fileName + iterator.checkParent = checkParent return iterator } @@ -71,20 +78,14 @@ func (c *commitFileIter) getNextFileCommit() (*Commit, error) { return nil, diffErr } - foundChangeForFile := false - for _, change := range changes { - if change.name() == c.fileName { - foundChangeForFile = true - break - } - } + found := c.hasFileChange(changes, parentCommit) // Storing the current-commit in-case a change is found, and // Updating the current-commit for the next-iteration prevCommit := c.currentCommit c.currentCommit = parentCommit - if foundChangeForFile == true { + if found { return prevCommit, nil } @@ -95,6 +96,35 @@ func (c *commitFileIter) getNextFileCommit() (*Commit, error) { } } +func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool { + for _, change := range changes { + if change.name() != c.fileName { + continue + } + + // filename matches, now check if source iterator contains all commits (from all refs) + if c.checkParent { + if parent != nil && isParentHash(parent.Hash, c.currentCommit) { + return true + } + continue + } + + return true + } + + return false +} + +func isParentHash(hash plumbing.Hash, commit *Commit) bool { + for _, h := range commit.ParentHashes { + if h == hash { + return true + } + } + return false +} + func (c *commitFileIter) ForEach(cb func(*Commit) error) error { for { commit, nextErr := c.Next() diff --git a/plumbing/reference.go b/plumbing/reference.go index 2f53d4e..08e908f 100644 --- a/plumbing/reference.go +++ b/plumbing/reference.go @@ -55,6 +55,36 @@ func (r ReferenceType) String() string { // ReferenceName reference name's type ReferenceName string +// NewBranchReferenceName returns a reference name describing a branch based on +// his short name. +func NewBranchReferenceName(name string) ReferenceName { + return ReferenceName(refHeadPrefix + name) +} + +// NewNoteReferenceName returns a reference name describing a note based on his +// short name. +func NewNoteReferenceName(name string) ReferenceName { + return ReferenceName(refNotePrefix + name) +} + +// NewRemoteReferenceName returns a reference name describing a remote branch +// based on his short name and the remote name. +func NewRemoteReferenceName(remote, name string) ReferenceName { + return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name)) +} + +// NewRemoteHEADReferenceName returns a reference name describing a the HEAD +// branch of a remote. +func NewRemoteHEADReferenceName(remote string) ReferenceName { + return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD)) +} + +// NewTagReferenceName returns a reference name describing a tag based on short +// his name. +func NewTagReferenceName(name string) ReferenceName { + return ReferenceName(refTagPrefix + name) +} + // IsBranch check if a reference is a branch func (r ReferenceName) IsBranch() bool { return strings.HasPrefix(string(r), refHeadPrefix) diff --git a/plumbing/reference_test.go b/plumbing/reference_test.go index 47919ef..b3ccf53 100644 --- a/plumbing/reference_test.go +++ b/plumbing/reference_test.go @@ -54,6 +54,31 @@ func (s *ReferenceSuite) TestNewHashReference(c *C) { c.Assert(r.Hash(), Equals, NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) } +func (s *ReferenceSuite) TestNewBranchReferenceName(c *C) { + r := NewBranchReferenceName("foo") + c.Assert(r.String(), Equals, "refs/heads/foo") +} + +func (s *ReferenceSuite) TestNewNoteReferenceName(c *C) { + r := NewNoteReferenceName("foo") + c.Assert(r.String(), Equals, "refs/notes/foo") +} + +func (s *ReferenceSuite) TestNewRemoteReferenceName(c *C) { + r := NewRemoteReferenceName("bar", "foo") + c.Assert(r.String(), Equals, "refs/remotes/bar/foo") +} + +func (s *ReferenceSuite) TestNewRemoteHEADReferenceName(c *C) { + r := NewRemoteHEADReferenceName("foo") + c.Assert(r.String(), Equals, "refs/remotes/foo/HEAD") +} + +func (s *ReferenceSuite) TestNewTagReferenceName(c *C) { + r := NewTagReferenceName("foo") + c.Assert(r.String(), Equals, "refs/tags/foo") +} + func (s *ReferenceSuite) TestIsBranch(c *C) { r := ExampleReferenceName c.Assert(r.IsBranch(), Equals, true) diff --git a/plumbing/revlist/revlist.go b/plumbing/revlist/revlist.go index 0a9d1e8..7ad71ac 100644 --- a/plumbing/revlist/revlist.go +++ b/plumbing/revlist/revlist.go @@ -21,7 +21,20 @@ func Objects( objs, ignore []plumbing.Hash, ) ([]plumbing.Hash, error) { - ignore, err := objects(s, ignore, nil, true) + return ObjectsWithStorageForIgnores(s, s, objs, ignore) +} + +// ObjectsWithStorageForIgnores is the same as Objects, but a +// secondary storage layer can be provided, to be used to finding the +// full set of objects to be ignored while finding the reachable +// objects. This is useful when the main `s` storage layer is slow +// and/or remote, while the ignore list is available somewhere local. +func ObjectsWithStorageForIgnores( + s, ignoreStore storer.EncodedObjectStorer, + objs, + ignore []plumbing.Hash, +) ([]plumbing.Hash, error) { + ignore, err := objects(ignoreStore, ignore, nil, true) if err != nil { return nil, err } @@ -114,7 +127,6 @@ func reachableObjects( i := object.NewCommitPreorderIter(commit, seen, ignore) pending := make(map[plumbing.Hash]bool) addPendingParents(pending, visited, commit) - for { commit, err := i.Next() if err == io.EOF { diff --git a/plumbing/revlist/revlist_test.go b/plumbing/revlist/revlist_test.go index dea1c73..ceae727 100644 --- a/plumbing/revlist/revlist_test.go +++ b/plumbing/revlist/revlist_test.go @@ -129,6 +129,32 @@ func (s *RevListSuite) TestRevListObjectsTagObject(c *C) { c.Assert(len(hist), Equals, len(expected)) } +func (s *RevListSuite) TestRevListObjectsWithStorageForIgnores(c *C) { + sto := filesystem.NewStorage( + fixtures.ByTag("merge-conflict").One().DotGit(), + cache.NewObjectLRUDefault()) + + // The "merge-conflict" repo has one extra commit in it, with a + // two files modified in two different subdirs. + expected := map[string]bool{ + "1980fcf55330d9d94c34abee5ab734afecf96aba": true, // commit + "73d9cf44e9045254346c73f6646b08f9302c8570": true, // root dir + "e8435d512a98586bd2e4fcfcdf04101b0bb1b500": true, // go/ + "257cc5642cb1a054f08cc83f2d943e56fd3ebe99": true, // haskal.hs + "d499a1a0b79b7d87a35155afd0c1cce78b37a91c": true, // example.go + "d108adc364fb6f21395d011ae2c8a11d96905b0d": true, // haskal/ + } + + hist, err := ObjectsWithStorageForIgnores(sto, s.Storer, []plumbing.Hash{plumbing.NewHash("1980fcf55330d9d94c34abee5ab734afecf96aba")}, []plumbing.Hash{plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")}) + c.Assert(err, IsNil) + + for _, h := range hist { + c.Assert(expected[h.String()], Equals, true) + } + + c.Assert(len(hist), Equals, len(expected)) +} + // --- // | |\ // | | * b8e471f Creating changelog diff --git a/plumbing/storer/object.go b/plumbing/storer/object.go index 2ac9b09..98d1ec3 100644 --- a/plumbing/storer/object.go +++ b/plumbing/storer/object.go @@ -222,7 +222,7 @@ type MultiEncodedObjectIter struct { } // NewMultiEncodedObjectIter returns an object iterator for the given slice of -// objects. +// EncodedObjectIters. func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter { return &MultiEncodedObjectIter{iters: iters} } diff --git a/plumbing/storer/reference.go b/plumbing/storer/reference.go index 5e85a3b..cce72b4 100644 --- a/plumbing/storer/reference.go +++ b/plumbing/storer/reference.go @@ -131,9 +131,27 @@ func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) { // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error { + return forEachReferenceIter(iter, cb) +} + +type bareReferenceIterator interface { + Next() (*plumbing.Reference, error) + Close() +} + +func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error { defer iter.Close() - for _, r := range iter.series { - if err := cb(r); err != nil { + for { + obj, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if err := cb(obj); err != nil { if err == ErrStop { return nil } @@ -141,8 +159,6 @@ func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) erro return err } } - - return nil } // Close releases any resources used by the iterator. @@ -150,6 +166,52 @@ func (iter *ReferenceSliceIter) Close() { iter.pos = len(iter.series) } +// MultiReferenceIter implements ReferenceIter. It iterates over several +// ReferenceIter, +// +// The MultiReferenceIter must be closed with a call to Close() when it is no +// longer needed. +type MultiReferenceIter struct { + iters []ReferenceIter +} + +// NewMultiReferenceIter returns an reference iterator for the given slice of +// EncodedObjectIters. +func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter { + return &MultiReferenceIter{iters: iters} +} + +// Next returns the next reference from the iterator, if one iterator reach +// io.EOF is removed and the next one is used. +func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) { + if len(iter.iters) == 0 { + return nil, io.EOF + } + + obj, err := iter.iters[0].Next() + if err == io.EOF { + iter.iters[0].Close() + iter.iters = iter.iters[1:] + return iter.Next() + } + + return obj, err +} + +// ForEach call the cb function for each reference contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error { + return forEachReferenceIter(iter, cb) +} + +// Close releases any resources used by the iterator. +func (iter *MultiReferenceIter) Close() { + for _, i := range iter.iters { + i.Close() + } +} + // ResolveReference resolves a SymbolicReference to a HashReference. func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) { r, err := s.Reference(n) diff --git a/plumbing/storer/reference_test.go b/plumbing/storer/reference_test.go index 490ec95..1d02c22 100644 --- a/plumbing/storer/reference_test.go +++ b/plumbing/storer/reference_test.go @@ -172,3 +172,26 @@ func (s *ReferenceSuite) TestReferenceFilteredIterForEachStop(c *C) { c.Assert(count, Equals, 1) } + +func (s *ReferenceSuite) TestMultiReferenceIterForEach(c *C) { + i := NewMultiReferenceIter( + []ReferenceIter{ + NewReferenceSliceIter([]*plumbing.Reference{ + plumbing.NewReferenceFromStrings("foo", "foo"), + }), + NewReferenceSliceIter([]*plumbing.Reference{ + plumbing.NewReferenceFromStrings("bar", "bar"), + }), + }, + ) + + var result []string + err := i.ForEach(func(r *plumbing.Reference) error { + result = append(result, r.Name().String()) + return nil + }) + + c.Assert(err, IsNil) + c.Assert(result, HasLen, 2) + c.Assert(result, DeepEquals, []string{"foo", "bar"}) +} diff --git a/plumbing/transport/common.go b/plumbing/transport/common.go index f7b882b..dcf9391 100644 --- a/plumbing/transport/common.go +++ b/plumbing/transport/common.go @@ -19,10 +19,10 @@ import ( "fmt" "io" "net/url" - "regexp" "strconv" "strings" + giturl "gopkg.in/src-d/go-git.v4/internal/url" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" @@ -224,34 +224,28 @@ func getPath(u *url.URL) string { return res } -var ( - isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) - scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})/)?(?P<path>[^\\].*)$`) -) - func parseSCPLike(endpoint string) (*Endpoint, bool) { - if isSchemeRegExp.MatchString(endpoint) || !scpLikeUrlRegExp.MatchString(endpoint) { + if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) { return nil, false } - m := scpLikeUrlRegExp.FindStringSubmatch(endpoint) - - port, err := strconv.Atoi(m[3]) + user, host, portStr, path := giturl.FindScpLikeComponents(endpoint) + port, err := strconv.Atoi(portStr) if err != nil { port = 22 } return &Endpoint{ Protocol: "ssh", - User: m[1], - Host: m[2], + User: user, + Host: host, Port: port, - Path: m[4], + Path: path, }, true } func parseFile(endpoint string) (*Endpoint, bool) { - if isSchemeRegExp.MatchString(endpoint) { + if giturl.MatchesScheme(endpoint) { return nil, false } diff --git a/plumbing/transport/http/common.go b/plumbing/transport/http/common.go index c034846..5d3535e 100644 --- a/plumbing/transport/http/common.go +++ b/plumbing/transport/http/common.go @@ -4,6 +4,7 @@ package http import ( "bytes" "fmt" + "net" "net/http" "strconv" "strings" @@ -151,6 +152,18 @@ func (s *session) ModifyEndpointIfRedirect(res *http.Response) { return } + h, p, err := net.SplitHostPort(r.URL.Host) + if err != nil { + h = r.URL.Host + } + if p != "" { + port, err := strconv.Atoi(p) + if err == nil { + s.endpoint.Port = port + } + } + s.endpoint.Host = h + s.endpoint.Protocol = r.URL.Scheme s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)] } @@ -201,7 +214,14 @@ func (a *BasicAuth) String() string { return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked) } -// TokenAuth implements the go-git http.AuthMethod and transport.AuthMethod interfaces +// TokenAuth implements an http.AuthMethod that can be used with http transport +// to authenticate with HTTP token authentication (also known as bearer +// authentication). +// +// IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g. +// GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers +// use basic HTTP authentication, with the OAuth token as user or password. +// Check the documentation of your git server for details. type TokenAuth struct { Token string } diff --git a/plumbing/transport/http/common_test.go b/plumbing/transport/http/common_test.go index 71eede4..8b300e8 100644 --- a/plumbing/transport/http/common_test.go +++ b/plumbing/transport/http/common_test.go @@ -8,6 +8,7 @@ import ( "net" "net/http" "net/http/cgi" + "net/url" "os" "os/exec" "path/filepath" @@ -119,6 +120,42 @@ func (s *ClientSuite) TestSetAuthWrongType(c *C) { c.Assert(err, Equals, transport.ErrInvalidAuthMethod) } +func (s *ClientSuite) TestModifyEndpointIfRedirect(c *C) { + sess := &session{endpoint: nil} + u, _ := url.Parse("https://example.com/info/refs") + res := &http.Response{Request: &http.Request{URL: u}} + c.Assert(func() { + sess.ModifyEndpointIfRedirect(res) + }, PanicMatches, ".*nil pointer dereference.*") + + sess = &session{endpoint: nil} + // no-op - should return and not panic + sess.ModifyEndpointIfRedirect(&http.Response{}) + + data := []struct { + url string + endpoint *transport.Endpoint + expected *transport.Endpoint + }{ + {"https://example.com/foo/bar", nil, nil}, + {"https://example.com/foo.git/info/refs", + &transport.Endpoint{}, + &transport.Endpoint{Protocol: "https", Host: "example.com", Path: "/foo.git"}}, + {"https://example.com:8080/foo.git/info/refs", + &transport.Endpoint{}, + &transport.Endpoint{Protocol: "https", Host: "example.com", Port: 8080, Path: "/foo.git"}}, + } + + for _, d := range data { + u, _ := url.Parse(d.url) + sess := &session{endpoint: d.endpoint} + sess.ModifyEndpointIfRedirect(&http.Response{ + Request: &http.Request{URL: u}, + }) + c.Assert(d.endpoint, DeepEquals, d.expected) + } +} + type BaseSuite struct { fixtures.Suite diff --git a/plumbing/transport/ssh/upload_pack_test.go b/plumbing/transport/ssh/upload_pack_test.go index 87fd4f5..2685ff0 100644 --- a/plumbing/transport/ssh/upload_pack_test.go +++ b/plumbing/transport/ssh/upload_pack_test.go @@ -10,6 +10,7 @@ import ( "os/exec" "path/filepath" "strings" + "sync" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/test" @@ -97,13 +98,20 @@ func handlerSSH(s ssh.Session) { io.Copy(stdin, s) }() + var wg sync.WaitGroup + wg.Add(2) + go func() { - defer stderr.Close() + defer wg.Done() io.Copy(s.Stderr(), stderr) }() - defer stdout.Close() - io.Copy(s, stdout) + go func() { + defer wg.Done() + io.Copy(s, stdout) + }() + + wg.Wait() if err := cmd.Wait(); err != nil { return diff --git a/plumbing/transport/test/receive_pack.go b/plumbing/transport/test/receive_pack.go index 5aea1c0..8dcde8b 100644 --- a/plumbing/transport/test/receive_pack.go +++ b/plumbing/transport/test/receive_pack.go @@ -262,11 +262,7 @@ func (s *ReceivePackSuite) receivePackNoCheck(c *C, ep *transport.Endpoint, req.Packfile = s.emptyPackfile() } - if s, err := r.ReceivePack(context.Background(), req); err != nil { - return s, err - } else { - return s, err - } + return r.ReceivePack(context.Background(), req) } func (s *ReceivePackSuite) receivePack(c *C, ep *transport.Endpoint, |