diff options
Diffstat (limited to 'plumbing/format')
-rw-r--r-- | plumbing/format/packfile/delta_test.go | 51 | ||||
-rw-r--r-- | plumbing/format/packfile/encoder_advanced_test.go | 2 | ||||
-rw-r--r-- | plumbing/format/packfile/encoder_test.go | 2 | ||||
-rw-r--r-- | plumbing/format/packfile/fsobject.go | 54 | ||||
-rw-r--r-- | plumbing/format/packfile/packfile.go | 92 | ||||
-rw-r--r-- | plumbing/format/packfile/packfile_test.go | 12 | ||||
-rw-r--r-- | plumbing/format/packfile/patch_delta.go | 210 | ||||
-rw-r--r-- | plumbing/format/packfile/scanner.go | 15 |
8 files changed, 405 insertions, 33 deletions
diff --git a/plumbing/format/packfile/delta_test.go b/plumbing/format/packfile/delta_test.go index 98f53f6..137e485 100644 --- a/plumbing/format/packfile/delta_test.go +++ b/plumbing/format/packfile/delta_test.go @@ -1,8 +1,11 @@ package packfile import ( + "bytes" + "io/ioutil" "math/rand" + "github.com/go-git/go-git/v5/plumbing" . "gopkg.in/check.v1" ) @@ -97,6 +100,32 @@ func (s *DeltaSuite) TestAddDelta(c *C) { } } +func (s *DeltaSuite) TestAddDeltaReader(c *C) { + for _, t := range s.testCases { + baseBuf := genBytes(t.base) + baseObj := &plumbing.MemoryObject{} + baseObj.Write(baseBuf) + + targetBuf := genBytes(t.target) + + delta := DiffDelta(baseBuf, targetBuf) + deltaRC := ioutil.NopCloser(bytes.NewReader(delta)) + + c.Log("Executing test case:", t.description) + + resultRC, err := ReaderFromDelta(baseObj, deltaRC) + c.Assert(err, IsNil) + + result, err := ioutil.ReadAll(resultRC) + c.Assert(err, IsNil) + + err = resultRC.Close() + c.Assert(err, IsNil) + + c.Assert(result, DeepEquals, targetBuf) + } +} + func (s *DeltaSuite) TestIncompleteDelta(c *C) { for _, t := range s.testCases { c.Log("Incomplete delta on:", t.description) @@ -125,3 +154,25 @@ func (s *DeltaSuite) TestMaxCopySizeDelta(c *C) { c.Assert(err, IsNil) c.Assert(result, DeepEquals, targetBuf) } + +func (s *DeltaSuite) TestMaxCopySizeDeltaReader(c *C) { + baseBuf := randBytes(maxCopySize) + baseObj := &plumbing.MemoryObject{} + baseObj.Write(baseBuf) + + targetBuf := baseBuf[0:] + targetBuf = append(targetBuf, byte(1)) + + delta := DiffDelta(baseBuf, targetBuf) + deltaRC := ioutil.NopCloser(bytes.NewReader(delta)) + + resultRC, err := ReaderFromDelta(baseObj, deltaRC) + c.Assert(err, IsNil) + + result, err := ioutil.ReadAll(resultRC) + c.Assert(err, IsNil) + + err = resultRC.Close() + c.Assert(err, IsNil) + c.Assert(result, DeepEquals, targetBuf) +} diff --git a/plumbing/format/packfile/encoder_advanced_test.go b/plumbing/format/packfile/encoder_advanced_test.go index 95db5c0..15c0fba 100644 --- a/plumbing/format/packfile/encoder_advanced_test.go +++ b/plumbing/format/packfile/encoder_advanced_test.go @@ -105,7 +105,7 @@ func (s *EncoderAdvancedSuite) testEncodeDecode( _, err = f.Seek(0, io.SeekStart) c.Assert(err, IsNil) - p := NewPackfile(index, fs, f) + p := NewPackfile(index, fs, f, 0) decodeHash, err := p.ID() c.Assert(err, IsNil) diff --git a/plumbing/format/packfile/encoder_test.go b/plumbing/format/packfile/encoder_test.go index d2db892..c9d49c3 100644 --- a/plumbing/format/packfile/encoder_test.go +++ b/plumbing/format/packfile/encoder_test.go @@ -318,7 +318,7 @@ func packfileFromReader(c *C, buf *bytes.Buffer) (*Packfile, func()) { index, err := w.Index() c.Assert(err, IsNil) - return NewPackfile(index, fs, file), func() { + return NewPackfile(index, fs, file, 0), func() { c.Assert(file.Close(), IsNil) } } diff --git a/plumbing/format/packfile/fsobject.go b/plumbing/format/packfile/fsobject.go index c5edaf5..a395d17 100644 --- a/plumbing/format/packfile/fsobject.go +++ b/plumbing/format/packfile/fsobject.go @@ -7,19 +7,21 @@ import ( "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/cache" "github.com/go-git/go-git/v5/plumbing/format/idxfile" + "github.com/go-git/go-git/v5/utils/ioutil" ) // FSObject is an object from the packfile on the filesystem. type FSObject struct { - hash plumbing.Hash - h *ObjectHeader - offset int64 - size int64 - typ plumbing.ObjectType - index idxfile.Index - fs billy.Filesystem - path string - cache cache.Object + hash plumbing.Hash + h *ObjectHeader + offset int64 + size int64 + typ plumbing.ObjectType + index idxfile.Index + fs billy.Filesystem + path string + cache cache.Object + largeObjectThreshold int64 } // NewFSObject creates a new filesystem object. @@ -32,16 +34,18 @@ func NewFSObject( fs billy.Filesystem, path string, cache cache.Object, + largeObjectThreshold int64, ) *FSObject { return &FSObject{ - hash: hash, - offset: offset, - size: contentSize, - typ: finalType, - index: index, - fs: fs, - path: path, - cache: cache, + hash: hash, + offset: offset, + size: contentSize, + typ: finalType, + index: index, + fs: fs, + path: path, + cache: cache, + largeObjectThreshold: largeObjectThreshold, } } @@ -62,7 +66,21 @@ func (o *FSObject) Reader() (io.ReadCloser, error) { return nil, err } - p := NewPackfileWithCache(o.index, nil, f, o.cache) + p := NewPackfileWithCache(o.index, nil, f, o.cache, o.largeObjectThreshold) + if o.largeObjectThreshold > 0 && o.size > o.largeObjectThreshold { + // We have a big object + h, err := p.objectHeaderAtOffset(o.offset) + if err != nil { + return nil, err + } + + r, err := p.getReaderDirect(h) + if err != nil { + _ = f.Close() + return nil, err + } + return ioutil.NewReadCloserWithCloser(r, f.Close), nil + } r, err := p.getObjectContent(o.offset) if err != nil { _ = f.Close() diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go index ddd7f62..8dd6041 100644 --- a/plumbing/format/packfile/packfile.go +++ b/plumbing/format/packfile/packfile.go @@ -2,6 +2,8 @@ package packfile import ( "bytes" + "compress/zlib" + "fmt" "io" "os" @@ -35,11 +37,12 @@ const smallObjectThreshold = 16 * 1024 // Packfile allows retrieving information from inside a packfile. type Packfile struct { idxfile.Index - fs billy.Filesystem - file billy.File - s *Scanner - deltaBaseCache cache.Object - offsetToType map[int64]plumbing.ObjectType + fs billy.Filesystem + file billy.File + s *Scanner + deltaBaseCache cache.Object + offsetToType map[int64]plumbing.ObjectType + largeObjectThreshold int64 } // NewPackfileWithCache creates a new Packfile with the given object cache. @@ -50,6 +53,7 @@ func NewPackfileWithCache( fs billy.Filesystem, file billy.File, cache cache.Object, + largeObjectThreshold int64, ) *Packfile { s := NewScanner(file) return &Packfile{ @@ -59,6 +63,7 @@ func NewPackfileWithCache( s, cache, make(map[int64]plumbing.ObjectType), + largeObjectThreshold, } } @@ -66,8 +71,8 @@ func NewPackfileWithCache( // and packfile idx. // If the filesystem is provided, the packfile will return FSObjects, otherwise // it will return MemoryObjects. -func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile { - return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault()) +func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File, largeObjectThreshold int64) *Packfile { + return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault(), largeObjectThreshold) } // Get retrieves the encoded object in the packfile with the given hash. @@ -263,6 +268,7 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing. p.fs, p.file.Name(), p.deltaBaseCache, + p.largeObjectThreshold, ), nil } @@ -282,6 +288,50 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { return obj.Reader() } +func asyncReader(p *Packfile) (io.ReadCloser, error) { + reader := ioutil.NewReaderUsingReaderAt(p.file, p.s.r.offset) + zr := zlibReaderPool.Get().(io.ReadCloser) + + if err := zr.(zlib.Resetter).Reset(reader, nil); err != nil { + return nil, fmt.Errorf("zlib reset error: %s", err) + } + + return ioutil.NewReadCloserWithCloser(zr, func() error { + zlibReaderPool.Put(zr) + return nil + }), nil + +} + +func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) { + switch h.Type { + case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: + return asyncReader(p) + case plumbing.REFDeltaObject: + deltaRc, err := asyncReader(p) + if err != nil { + return nil, err + } + r, err := p.readREFDeltaObjectContent(h, deltaRc) + if err != nil { + return nil, err + } + return r, nil + case plumbing.OFSDeltaObject: + deltaRc, err := asyncReader(p) + if err != nil { + return nil, err + } + r, err := p.readOFSDeltaObjectContent(h, deltaRc) + if err != nil { + return nil, err + } + return r, nil + default: + return nil, ErrInvalidObject.AddDetails("type %q", h.Type) + } +} + func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) { var obj = new(plumbing.MemoryObject) obj.SetSize(h.Length) @@ -334,6 +384,20 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf) } +func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) { + var err error + + base, ok := p.cacheGet(h.Reference) + if !ok { + base, err = p.Get(h.Reference) + if err != nil { + return nil, err + } + } + + return ReaderFromDelta(base, deltaRC) +} + func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error { var err error @@ -364,6 +428,20 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf) } +func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) { + hash, err := p.FindHash(h.OffsetReference) + if err != nil { + return nil, err + } + + base, err := p.objectAtOffset(h.OffsetReference, hash) + if err != nil { + return nil, err + } + + return ReaderFromDelta(base, deltaRC) +} + func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error { hash, err := p.FindHash(offset) if err != nil { diff --git a/plumbing/format/packfile/packfile_test.go b/plumbing/format/packfile/packfile_test.go index 60c7c73..6af8817 100644 --- a/plumbing/format/packfile/packfile_test.go +++ b/plumbing/format/packfile/packfile_test.go @@ -111,7 +111,7 @@ func (s *PackfileSuite) SetUpTest(c *C) { s.idx = idxfile.NewMemoryIndex() c.Assert(idxfile.NewDecoder(s.f.Idx()).Decode(s.idx), IsNil) - s.p = packfile.NewPackfile(s.idx, fixtures.Filesystem, s.f.Packfile()) + s.p = packfile.NewPackfile(s.idx, fixtures.Filesystem, s.f.Packfile(), 0) } func (s *PackfileSuite) TearDownTest(c *C) { @@ -122,7 +122,7 @@ func (s *PackfileSuite) TestDecode(c *C) { fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) { index := getIndexFromIdxFile(f.Idx()) - p := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile()) + p := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile(), 0) defer p.Close() for _, h := range expectedHashes { @@ -138,7 +138,7 @@ func (s *PackfileSuite) TestDecodeByTypeRefDelta(c *C) { index := getIndexFromIdxFile(f.Idx()) - packfile := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile()) + packfile := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile(), 0) defer packfile.Close() iter, err := packfile.GetByType(plumbing.CommitObject) @@ -171,7 +171,7 @@ func (s *PackfileSuite) TestDecodeByType(c *C) { for _, t := range ts { index := getIndexFromIdxFile(f.Idx()) - packfile := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile()) + packfile := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile(), 0) defer packfile.Close() iter, err := packfile.GetByType(t) @@ -189,7 +189,7 @@ func (s *PackfileSuite) TestDecodeByTypeConstructor(c *C) { f := fixtures.Basic().ByTag("packfile").One() index := getIndexFromIdxFile(f.Idx()) - packfile := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile()) + packfile := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile(), 0) defer packfile.Close() _, err := packfile.GetByType(plumbing.OFSDeltaObject) @@ -266,7 +266,7 @@ func (s *PackfileSuite) TestSize(c *C) { index := getIndexFromIdxFile(f.Idx()) - packfile := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile()) + packfile := packfile.NewPackfile(index, fixtures.Filesystem, f.Packfile(), 0) defer packfile.Close() // Get the size of binary.jpg, which is not delta-encoded. diff --git a/plumbing/format/packfile/patch_delta.go b/plumbing/format/packfile/patch_delta.go index 9e90f30..17da11e 100644 --- a/plumbing/format/packfile/patch_delta.go +++ b/plumbing/format/packfile/patch_delta.go @@ -1,9 +1,11 @@ package packfile import ( + "bufio" "bytes" "errors" "io" + "math" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/utils/ioutil" @@ -73,6 +75,131 @@ func PatchDelta(src, delta []byte) ([]byte, error) { return b.Bytes(), nil } +func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadCloser, error) { + deltaBuf := bufio.NewReaderSize(deltaRC, 1024) + srcSz, err := decodeLEB128ByteReader(deltaBuf) + if err != nil { + if err == io.EOF { + return nil, ErrInvalidDelta + } + return nil, err + } + if srcSz != uint(base.Size()) { + return nil, ErrInvalidDelta + } + + targetSz, err := decodeLEB128ByteReader(deltaBuf) + if err != nil { + if err == io.EOF { + return nil, ErrInvalidDelta + } + return nil, err + } + remainingTargetSz := targetSz + + dstRd, dstWr := io.Pipe() + + go func() { + baseRd, err := base.Reader() + if err != nil { + _ = dstWr.CloseWithError(ErrInvalidDelta) + return + } + defer baseRd.Close() + + baseBuf := bufio.NewReader(baseRd) + basePos := uint(0) + + for { + cmd, err := deltaBuf.ReadByte() + if err == io.EOF { + _ = dstWr.CloseWithError(ErrInvalidDelta) + return + } + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + + if isCopyFromSrc(cmd) { + offset, err := decodeOffsetByteReader(cmd, deltaBuf) + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + sz, err := decodeSizeByteReader(cmd, deltaBuf) + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + + if invalidSize(sz, targetSz) || + invalidOffsetSize(offset, sz, srcSz) { + _ = dstWr.Close() + return + } + + discard := offset - basePos + if basePos > offset { + _ = baseRd.Close() + baseRd, err = base.Reader() + if err != nil { + _ = dstWr.CloseWithError(ErrInvalidDelta) + return + } + baseBuf.Reset(baseRd) + discard = offset + } + for discard > math.MaxInt32 { + n, err := baseBuf.Discard(math.MaxInt32) + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + basePos += uint(n) + discard -= uint(n) + } + for discard > 0 { + n, err := baseBuf.Discard(int(discard)) + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + basePos += uint(n) + discard -= uint(n) + } + if _, err := io.Copy(dstWr, io.LimitReader(baseBuf, int64(sz))); err != nil { + _ = dstWr.CloseWithError(err) + return + } + remainingTargetSz -= sz + basePos += sz + } else if isCopyFromDelta(cmd) { + sz := uint(cmd) // cmd is the size itself + if invalidSize(sz, targetSz) { + _ = dstWr.CloseWithError(ErrInvalidDelta) + return + } + if _, err := io.Copy(dstWr, io.LimitReader(deltaBuf, int64(sz))); err != nil { + _ = dstWr.CloseWithError(err) + return + } + + remainingTargetSz -= sz + } else { + _ = dstWr.CloseWithError(ErrDeltaCmd) + return + } + if remainingTargetSz <= 0 { + _ = dstWr.Close() + return + } + } + }() + + return dstRd, nil +} + func patchDelta(dst *bytes.Buffer, src, delta []byte) error { if len(delta) < deltaSizeMin { return ErrInvalidDelta @@ -161,6 +288,25 @@ func decodeLEB128(input []byte) (uint, []byte) { return num, input[sz:] } +func decodeLEB128ByteReader(input io.ByteReader) (uint, error) { + var num, sz uint + for { + b, err := input.ReadByte() + if err != nil { + return 0, err + } + + num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks + sz++ + + if uint(b)&continuation == 0 { + break + } + } + + return num, nil +} + const ( payload = 0x7f // 0111 1111 continuation = 0x80 // 1000 0000 @@ -174,6 +320,40 @@ func isCopyFromDelta(cmd byte) bool { return (cmd&0x80) == 0 && cmd != 0 } +func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) { + var offset uint + if (cmd & 0x01) != 0 { + next, err := delta.ReadByte() + if err != nil { + return 0, err + } + offset = uint(next) + } + if (cmd & 0x02) != 0 { + next, err := delta.ReadByte() + if err != nil { + return 0, err + } + offset |= uint(next) << 8 + } + if (cmd & 0x04) != 0 { + next, err := delta.ReadByte() + if err != nil { + return 0, err + } + offset |= uint(next) << 16 + } + if (cmd & 0x08) != 0 { + next, err := delta.ReadByte() + if err != nil { + return 0, err + } + offset |= uint(next) << 24 + } + + return offset, nil +} + func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { var offset uint if (cmd & 0x01) != 0 { @@ -208,6 +388,36 @@ func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { return offset, delta, nil } +func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) { + var sz uint + if (cmd & 0x10) != 0 { + next, err := delta.ReadByte() + if err != nil { + return 0, err + } + sz = uint(next) + } + if (cmd & 0x20) != 0 { + next, err := delta.ReadByte() + if err != nil { + return 0, err + } + sz |= uint(next) << 8 + } + if (cmd & 0x40) != 0 { + next, err := delta.ReadByte() + if err != nil { + return 0, err + } + sz |= uint(next) << 16 + } + if sz == 0 { + sz = 0x10000 + } + + return sz, nil +} + func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { var sz uint if (cmd & 0x10) != 0 { diff --git a/plumbing/format/packfile/scanner.go b/plumbing/format/packfile/scanner.go index 6e6a687..5d9e8fb 100644 --- a/plumbing/format/packfile/scanner.go +++ b/plumbing/format/packfile/scanner.go @@ -320,6 +320,21 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro return } +// ReadObject returns a reader for the object content and an error +func (s *Scanner) ReadObject() (io.ReadCloser, error) { + s.pendingObject = nil + zr := zlibReaderPool.Get().(io.ReadCloser) + + if err := zr.(zlib.Resetter).Reset(s.r, nil); err != nil { + return nil, fmt.Errorf("zlib reset error: %s", err) + } + + return ioutil.NewReadCloserWithCloser(zr, func() error { + zlibReaderPool.Put(zr) + return nil + }), nil +} + // ReadRegularObject reads and write a non-deltified object // from it zlib stream in an object entry in the packfile. func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { |