aboutsummaryrefslogtreecommitdiffstats
path: root/formats
diff options
context:
space:
mode:
Diffstat (limited to 'formats')
-rw-r--r--formats/packfile/decoder.go180
-rw-r--r--formats/packfile/decoder_test.go36
-rw-r--r--formats/packfile/parser.go154
-rw-r--r--formats/packfile/parser_test.go20
4 files changed, 247 insertions, 143 deletions
diff --git a/formats/packfile/decoder.go b/formats/packfile/decoder.go
index 3da927d..18ec6b9 100644
--- a/formats/packfile/decoder.go
+++ b/formats/packfile/decoder.go
@@ -2,8 +2,6 @@ package packfile
import (
"bytes"
- "io"
- "os"
"gopkg.in/src-d/go-git.v4/core"
)
@@ -32,9 +30,8 @@ var (
// ErrZLib is returned by Decode when there was an error unzipping
// the packfile contents.
ErrZLib = NewError("zlib reading error")
- // ErrDuplicatedObject is returned by Remember if an object appears several
- // times in a packfile.
- ErrDuplicatedObject = NewError("duplicated object")
+ // ErrNotSeeker not seeker supported
+ ErrNotSeeker = NewError("no seeker capable decode")
// ErrCannotRecall is returned by RecallByOffset or RecallByHash if the object
// to recall cannot be returned.
ErrCannotRecall = NewError("cannot recall object")
@@ -42,33 +39,43 @@ var (
// Decoder reads and decodes packfiles from an input stream.
type Decoder struct {
- p *Parser
- s core.ObjectStorage
- seeker io.Seeker
+ scanner *Scanner
+ storage core.ObjectStorage
offsetToObject map[int64]core.Object
hashToOffset map[core.Hash]int64
}
// NewDecoder returns a new Decoder that reads from r.
-func NewDecoder(s core.ObjectStorage, p *Parser, seeker io.Seeker) *Decoder {
+func NewDecoder(p *Scanner, s core.ObjectStorage) *Decoder {
return &Decoder{
- p: p,
- s: s,
- seeker: seeker,
+ scanner: p,
+ storage: s,
offsetToObject: make(map[int64]core.Object, 0),
hashToOffset: make(map[core.Hash]int64, 0),
}
}
// Decode reads a packfile and stores it in the value pointed to by s.
-func (d *Decoder) Decode() error {
- _, count, err := d.p.Header()
+func (d *Decoder) Decode() (checksum core.Hash, err error) {
+ if err := d.doDecode(); err != nil {
+ return core.ZeroHash, err
+ }
+
+ return d.scanner.Checksum()
+}
+
+func (d *Decoder) doDecode() error {
+ _, count, err := d.scanner.Header()
if err != nil {
return err
}
- tx := d.s.Begin()
- if err := d.readObjects(tx, count); err != nil {
+ if d.storage == nil {
+ return d.readObjects(count, nil)
+ }
+
+ tx := d.storage.Begin()
+ if err := d.readObjects(count, tx); err != nil {
if err := tx.Rollback(); err != nil {
return nil
}
@@ -76,23 +83,27 @@ func (d *Decoder) Decode() error {
return err
}
- return tx.Commit()
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
}
-func (d *Decoder) readObjects(tx core.TxObjectStorage, count uint32) error {
- // This code has 50-80 µs of overhead per object not counting zlib inflation.
- // Together with zlib inflation, it's 400-410 µs for small objects.
- // That's 1 sec for ~2450 objects, ~4.20 MB, or ~250 ms per MB,
- // of which 12-20 % is _not_ zlib inflation (ie. is our code).
+func (d *Decoder) readObjects(count uint32, tx core.TxObjectStorage) error {
for i := 0; i < int(count); i++ {
obj, err := d.readObject()
if err != nil {
return err
}
+ if tx == nil {
+ continue
+ }
+
_, err = tx.Set(obj)
- if err == io.EOF {
- break
+ if err != nil {
+ return err
}
}
@@ -100,12 +111,12 @@ func (d *Decoder) readObjects(tx core.TxObjectStorage, count uint32) error {
}
func (d *Decoder) readObject() (core.Object, error) {
- h, err := d.p.NextObjectHeader()
+ h, err := d.scanner.NextObjectHeader()
if err != nil {
return nil, err
}
- obj := d.s.NewObject()
+ obj := d.newObject()
obj.SetSize(h.Length)
obj.SetType(h.Type)
@@ -120,7 +131,20 @@ func (d *Decoder) readObject() (core.Object, error) {
err = ErrInvalidObject.AddDetails("type %q", h.Type)
}
- return obj, d.remember(h.Offset, obj)
+ if err != nil {
+ return obj, err
+ }
+
+ d.remember(h.Offset, obj)
+ return obj, nil
+}
+
+func (d *Decoder) newObject() core.Object {
+ if d.storage == nil {
+ return &core.MemoryObject{}
+ }
+
+ return d.storage.NewObject()
}
func (d *Decoder) fillRegularObjectContent(obj core.Object) error {
@@ -129,103 +153,107 @@ func (d *Decoder) fillRegularObjectContent(obj core.Object) error {
return err
}
- _, err = d.p.NextObject(w)
+ _, err = d.scanner.NextObject(w)
return err
}
func (d *Decoder) fillREFDeltaObjectContent(obj core.Object, ref core.Hash) error {
- base, err := d.recallByHash(ref)
- if err != nil {
- return err
- }
- obj.SetType(base.Type())
- if err := d.readAndApplyDelta(obj, base); err != nil {
+ buf := bytes.NewBuffer(nil)
+ if _, err := d.scanner.NextObject(buf); err != nil {
return err
}
- return nil
-}
-
-func (d *Decoder) fillOFSDeltaObjectContent(obj core.Object, offset int64) error {
- base, err := d.recallByOffset(offset)
+ base, err := d.recallByHash(ref)
if err != nil {
return err
}
obj.SetType(base.Type())
- if err := d.readAndApplyDelta(obj, base); err != nil {
- return err
- }
-
- return nil
+ return ApplyDelta(obj, base, buf.Bytes())
}
-// ReadAndApplyDelta reads and apply the base patched with the contents
-// of a zlib compressed diff data in the delta portion of an object
-// entry in the packfile.
-func (d *Decoder) readAndApplyDelta(target, base core.Object) error {
+func (d *Decoder) fillOFSDeltaObjectContent(obj core.Object, offset int64) error {
buf := bytes.NewBuffer(nil)
- if _, err := d.p.NextObject(buf); err != nil {
+ if _, err := d.scanner.NextObject(buf); err != nil {
+ return err
+ }
+
+ base, err := d.recallByOffset(offset)
+ if err != nil {
return err
}
- return ApplyDelta(target, base, buf.Bytes())
+ obj.SetType(base.Type())
+ return ApplyDelta(obj, base, buf.Bytes())
}
-// Remember stores the offset of the object and its hash, but not the
-// object itself. This implementation does not check for already stored
-// offsets, as it is too expensive to build this information from an
-// index every time a get operation is performed on the SeekableReadRecaller.
-func (r *Decoder) remember(o int64, obj core.Object) error {
+// remember stores the offset of the object and its hash and the object itself.
+// If a seeker was not provided to the decoder, the objects are stored in memory
+func (d *Decoder) remember(o int64, obj core.Object) {
h := obj.Hash()
- r.hashToOffset[h] = o
- r.offsetToObject[o] = obj
- return nil
+
+ d.hashToOffset[h] = o
+ if !d.scanner.IsSeekable() {
+ d.offsetToObject[o] = obj
+ }
}
-// RecallByHash returns the object for a given hash by looking for it again in
+// recallByHash returns the object for a given hash by looking for it again in
// the io.ReadeSeerker.
-func (r *Decoder) recallByHash(h core.Hash) (core.Object, error) {
- o, ok := r.hashToOffset[h]
+func (d *Decoder) recallByHash(h core.Hash) (core.Object, error) {
+ o, ok := d.hashToOffset[h]
if !ok {
return nil, ErrCannotRecall.AddDetails("hash not found: %s", h)
}
- return r.recallByOffset(o)
+ return d.recallByOffset(o)
}
-// RecallByOffset returns the object for a given offset by looking for it again in
+// recallByOffset returns the object for a given offset by looking for it again in
// the io.ReadeSeerker. For efficiency reasons, this method always find objects by
// offset, even if they have not been remembered or if they have been forgetted.
-func (r *Decoder) recallByOffset(o int64) (obj core.Object, err error) {
- obj, ok := r.offsetToObject[o]
+func (d *Decoder) recallByOffset(o int64) (core.Object, error) {
+ obj, ok := d.offsetToObject[o]
if ok {
return obj, nil
}
- if !ok && r.seeker == nil {
+ if !ok && !d.scanner.IsSeekable() {
return nil, ErrCannotRecall.AddDetails("no object found at offset %d", o)
}
- // remember current offset
- beforeJump, err := r.seeker.Seek(0, os.SEEK_CUR)
+ return d.ReadObjectAt(o)
+}
+
+// ReadObjectAt reads an object at the given location
+func (d *Decoder) ReadObjectAt(offset int64) (core.Object, error) {
+ if !d.scanner.IsSeekable() {
+ return nil, ErrNotSeeker
+ }
+
+ beforeJump, err := d.scanner.Seek(offset)
if err != nil {
return nil, err
}
defer func() {
- // jump back
- _, seekErr := r.seeker.Seek(beforeJump, os.SEEK_SET)
+ _, seekErr := d.scanner.Seek(beforeJump)
if err == nil {
err = seekErr
}
}()
- // jump to requested offset
- _, err = r.seeker.Seek(o, os.SEEK_SET)
- if err != nil {
- return nil, err
- }
+ return d.readObject()
+}
+
+// Index returns an index of the objects read by hash and the position where
+// was read
+func (d *Decoder) Index() map[core.Hash]int64 {
+ return d.hashToOffset
+}
- return r.readObject()
+// Close close the Scanner, usually this mean that the whole reader is read and
+// discarded
+func (d *Decoder) Close() error {
+ return d.scanner.Close()
}
diff --git a/formats/packfile/decoder_test.go b/formats/packfile/decoder_test.go
index 5a95af1..e229f50 100644
--- a/formats/packfile/decoder_test.go
+++ b/formats/packfile/decoder_test.go
@@ -24,9 +24,9 @@ func (s *ReaderSuite) TestReadPackfile(c *C) {
data, _ := base64.StdEncoding.DecodeString(packFileWithEmptyObjects)
f := bytes.NewReader(data)
sto := memory.NewStorage()
- d := NewDecoder(sto.ObjectStorage(), NewParser(f), nil)
+ d := NewDecoder(NewScanner(f), sto.ObjectStorage())
- err := d.Decode()
+ _, err := d.Decode()
c.Assert(err, IsNil)
AssertObjects(c, sto, []string{
@@ -44,21 +44,39 @@ func (s *ReaderSuite) TestReadPackfile(c *C) {
})
}
-func (s *ReaderSuite) TestReadPackfileOFSDelta(c *C) {
- s.testReadPackfileGitFixture(c, "fixtures/git-fixture.ofs-delta", OFSDeltaFormat)
+func (s *ReaderSuite) TestDecodeOFSDelta(c *C) {
+ s.testDecode(c, "fixtures/git-fixture.ofs-delta", true)
+}
+func (s *ReaderSuite) TestDecodeOFSDeltaNoSeekable(c *C) {
+ s.testDecode(c, "fixtures/git-fixture.ofs-delta", false)
}
-func (s *ReaderSuite) TestReadPackfileREFDelta(c *C) {
- s.testReadPackfileGitFixture(c, "fixtures/git-fixture.ref-delta", REFDeltaFormat)
+
+func (s *ReaderSuite) TestDecodeREFDelta(c *C) {
+ s.testDecode(c, "fixtures/git-fixture.ref-delta", true)
}
-func (s *ReaderSuite) testReadPackfileGitFixture(c *C, file string, format Format) {
+func (s *ReaderSuite) TestDecodeREFDeltaNoSeekable(c *C) {
+ s.testDecode(c, "fixtures/git-fixture.ref-delta", false)
+}
+
+func (s *ReaderSuite) testDecode(c *C, file string, seekable bool) {
f, err := os.Open(file)
c.Assert(err, IsNil)
+
+ scanner := NewScanner(f)
+ if !seekable {
+ scanner = NewScannerFromReader(f)
+ }
+
+ s.doTestDecodeWithScanner(c, scanner)
+}
+
+func (s *ReaderSuite) doTestDecodeWithScanner(c *C, scanner *Scanner) {
sto := memory.NewStorage()
- d := NewDecoder(sto.ObjectStorage(), NewParser(f), f)
+ d := NewDecoder(scanner, sto.ObjectStorage())
- err = d.Decode()
+ _, err := d.Decode()
c.Assert(err, IsNil)
AssertObjects(c, sto, []string{
diff --git a/formats/packfile/parser.go b/formats/packfile/parser.go
index c1653c8..94f552a 100644
--- a/formats/packfile/parser.go
+++ b/formats/packfile/parser.go
@@ -19,6 +19,8 @@ var (
// ErrUnsupportedVersion is returned by ReadHeader when the packfile version is
// different than VersionSupported.
ErrUnsupportedVersion = NewError("unsupported packfile version")
+ // ErrSeekNotSupported returned if seek is not support
+ ErrSeekNotSupported = NewError("not seek support")
)
const (
@@ -26,6 +28,8 @@ const (
VersionSupported uint32 = 2
)
+// ObjectHeader contains the information related to the object, this information
+// is collected from the previous bytes to the content of the object.
type ObjectHeader struct {
Type core.ObjectType
Offset int64
@@ -36,8 +40,8 @@ type ObjectHeader struct {
// A Parser is a collection of functions to read and process data form a packfile.
// Values from this type are not zero-value safe. See the NewParser function bellow.
-type Parser struct {
- r *trackableReader
+type Scanner struct {
+ r *byteReadSeeker
// pendingObject is used to detect if an object has been read, or still
// is waiting to be read
@@ -45,15 +49,21 @@ type Parser struct {
}
// NewParser returns a new Parser that reads from the packfile represented by r.
-func NewParser(r io.Reader) *Parser {
- return &Parser{r: &trackableReader{Reader: r}}
+func NewScannerFromReader(r io.Reader) *Scanner {
+ s := &trackableReader{Reader: r}
+ return NewScanner(s)
+}
+
+func NewScanner(r io.ReadSeeker) *Scanner {
+ s := &byteReadSeeker{r}
+ return &Scanner{r: s}
}
// Header reads the whole packfile header (signature, version and object count).
// It returns the version and the object count and performs checks on the
// validity of the signature and the version fields.
-func (p *Parser) Header() (version, objects uint32, err error) {
- sig, err := p.readSignature()
+func (s *Scanner) Header() (version, objects uint32, err error) {
+ sig, err := s.readSignature()
if err != nil {
if err == io.EOF {
err = ErrEmptyPackfile
@@ -62,29 +72,29 @@ func (p *Parser) Header() (version, objects uint32, err error) {
return
}
- if !p.isValidSignature(sig) {
+ if !s.isValidSignature(sig) {
err = ErrBadSignature
return
}
- version, err = p.readVersion()
+ version, err = s.readVersion()
if err != nil {
return
}
- if !p.isSupportedVersion(version) {
+ if !s.isSupportedVersion(version) {
err = ErrUnsupportedVersion.AddDetails("%d", version)
return
}
- objects, err = p.readCount()
+ objects, err = s.readCount()
return
}
// readSignature reads an returns the signature field in the packfile.
-func (p *Parser) readSignature() ([]byte, error) {
+func (s *Scanner) readSignature() ([]byte, error) {
var sig = make([]byte, 4)
- if _, err := io.ReadFull(p.r, sig); err != nil {
+ if _, err := io.ReadFull(s.r, sig); err != nil {
return []byte{}, err
}
@@ -92,58 +102,59 @@ func (p *Parser) readSignature() ([]byte, error) {
}
// isValidSignature returns if sig is a valid packfile signature.
-func (p *Parser) isValidSignature(sig []byte) bool {
+func (s *Scanner) isValidSignature(sig []byte) bool {
return bytes.Equal(sig, []byte{'P', 'A', 'C', 'K'})
}
// readVersion reads and returns the version field of a packfile.
-func (p *Parser) readVersion() (uint32, error) {
- return p.readInt32()
+func (s *Scanner) readVersion() (uint32, error) {
+ return s.readInt32()
}
// isSupportedVersion returns whether version v is supported by the parser.
// The current supported version is VersionSupported, defined above.
-func (p *Parser) isSupportedVersion(v uint32) bool {
+func (s *Scanner) isSupportedVersion(v uint32) bool {
return v == VersionSupported
}
// readCount reads and returns the count of objects field of a packfile.
-func (p *Parser) readCount() (uint32, error) {
- return p.readInt32()
+func (s *Scanner) readCount() (uint32, error) {
+ return s.readInt32()
}
// ReadInt32 reads 4 bytes and returns them as a Big Endian int32.
-func (p *Parser) readInt32() (uint32, error) {
+func (s *Scanner) readInt32() (uint32, error) {
var v uint32
- if err := binary.Read(p.r, binary.BigEndian, &v); err != nil {
+ if err := binary.Read(s.r, binary.BigEndian, &v); err != nil {
return 0, err
}
return v, nil
}
-func (p *Parser) NextObjectHeader() (*ObjectHeader, error) {
- if err := p.discardObjectIfNeeded(); err != nil {
+// NextObjectHeader returns the ObjectHeader for the next object in the reader
+func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
+ if err := s.discardObjectIfNeeded(); err != nil {
return nil, err
}
h := &ObjectHeader{}
- p.pendingObject = h
+ s.pendingObject = h
var err error
- h.Offset, err = p.r.Offset()
+ h.Offset, err = s.r.Seek(0, io.SeekCurrent)
if err != nil {
return nil, err
}
- h.Type, h.Length, err = p.readObjectTypeAndLength()
+ h.Type, h.Length, err = s.readObjectTypeAndLength()
if err != nil {
return nil, err
}
switch h.Type {
case core.OFSDeltaObject:
- no, err := p.readNegativeOffset()
+ no, err := s.readNegativeOffset()
if err != nil {
return nil, err
}
@@ -151,7 +162,7 @@ func (p *Parser) NextObjectHeader() (*ObjectHeader, error) {
h.OffsetReference = h.Offset + no
case core.REFDeltaObject:
var err error
- h.Reference, err = p.readHash()
+ h.Reference, err = s.readHash()
if err != nil {
return nil, err
}
@@ -160,7 +171,7 @@ func (p *Parser) NextObjectHeader() (*ObjectHeader, error) {
return h, nil
}
-func (s *Parser) discardObjectIfNeeded() error {
+func (s *Scanner) discardObjectIfNeeded() error {
if s.pendingObject == nil {
return nil
}
@@ -183,21 +194,21 @@ func (s *Parser) discardObjectIfNeeded() error {
// ReadObjectTypeAndLength reads and returns the object type and the
// length field from an object entry in a packfile.
-func (p Parser) readObjectTypeAndLength() (core.ObjectType, int64, error) {
- t, c, err := p.readType()
+func (s *Scanner) readObjectTypeAndLength() (core.ObjectType, int64, error) {
+ t, c, err := s.readType()
if err != nil {
return t, 0, err
}
- l, err := p.readLength(c)
+ l, err := s.readLength(c)
return t, l, err
}
-func (p Parser) readType() (core.ObjectType, byte, error) {
+func (s *Scanner) readType() (core.ObjectType, byte, error) {
var c byte
var err error
- if c, err = p.r.ReadByte(); err != nil {
+ if c, err = s.r.ReadByte(); err != nil {
return core.ObjectType(0), 0, err
}
@@ -208,14 +219,14 @@ func (p Parser) readType() (core.ObjectType, byte, error) {
// the length is codified in the last 4 bits of the first byte and in
// the last 7 bits of subsequent bytes. Last byte has a 0 MSB.
-func (p *Parser) readLength(first byte) (int64, error) {
+func (s *Scanner) readLength(first byte) (int64, error) {
length := int64(first & maskFirstLength)
c := first
shift := firstLengthBits
var err error
for moreBytesInLength(c) {
- if c, err = p.r.ReadByte(); err != nil {
+ if c, err = s.r.ReadByte(); err != nil {
return 0, err
}
@@ -226,15 +237,15 @@ func (p *Parser) readLength(first byte) (int64, error) {
return length, nil
}
-func (p *Parser) NextObject(w io.Writer) (written int64, err error) {
- p.pendingObject = nil
- return p.copyObject(w)
+func (s *Scanner) NextObject(w io.Writer) (written int64, err error) {
+ s.pendingObject = nil
+ return s.copyObject(w)
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
-func (p *Parser) copyObject(w io.Writer) (int64, error) {
- zr, err := zlib.NewReader(p.r)
+func (s *Scanner) copyObject(w io.Writer) (int64, error) {
+ zr, err := zlib.NewReader(s.r)
if err != nil {
if err != zlib.ErrHeader {
return -1, fmt.Errorf("zlib reading error: %s", err)
@@ -251,14 +262,35 @@ func (p *Parser) copyObject(w io.Writer) (int64, error) {
return io.Copy(w, zr)
}
-func (p *Parser) Checksum() (core.Hash, error) {
- return p.readHash()
+func (s *Scanner) IsSeekable() bool {
+ _, ok := s.r.ReadSeeker.(*trackableReader)
+ return !ok
+}
+
+// Seek sets a new offset from start, returns the old position before the change
+func (s *Scanner) Seek(offset int64) (previous int64, err error) {
+ previous, err = s.r.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return -1, err
+ }
+
+ _, err = s.r.Seek(offset, io.SeekStart)
+ return previous, err
+}
+
+func (s *Scanner) Checksum() (core.Hash, error) {
+ err := s.discardObjectIfNeeded()
+ if err != nil {
+ return core.ZeroHash, err
+ }
+
+ return s.readHash()
}
// ReadHash reads a hash.
-func (p *Parser) readHash() (core.Hash, error) {
+func (s *Scanner) readHash() (core.Hash, error) {
var h core.Hash
- if _, err := io.ReadFull(p.r, h[:]); err != nil {
+ if _, err := io.ReadFull(s.r, h[:]); err != nil {
return core.ZeroHash, err
}
@@ -292,18 +324,18 @@ func (p *Parser) readHash() (core.Hash, error) {
// while (ofs >>= 7)
// dheader[--pos] = 128 | (--ofs & 127);
//
-func (p *Parser) readNegativeOffset() (int64, error) {
+func (s *Scanner) readNegativeOffset() (int64, error) {
var c byte
var err error
- if c, err = p.r.ReadByte(); err != nil {
+ if c, err = s.r.ReadByte(); err != nil {
return 0, err
}
var offset = int64(c & maskLength)
for moreBytesInLength(c) {
offset++
- if c, err = p.r.ReadByte(); err != nil {
+ if c, err = s.r.ReadByte(); err != nil {
return 0, err
}
offset = (offset << lengthBits) + int64(c&maskLength)
@@ -312,6 +344,11 @@ func (p *Parser) readNegativeOffset() (int64, error) {
return -offset, nil
}
+func (s *Scanner) Close() error {
+ _, err := io.Copy(ioutil.Discard, s.r)
+ return err
+}
+
func moreBytesInLength(c byte) bool {
return c&maskContinue > 0
}
@@ -342,16 +379,23 @@ func (r *trackableReader) Read(p []byte) (n int, err error) {
return
}
+// Seek only supports io.SeekCurrent, any other operation fails
+func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
+ if whence != io.SeekCurrent {
+ return -1, ErrSeekNotSupported
+ }
+
+ return r.count, nil
+}
+
+type byteReadSeeker struct {
+ io.ReadSeeker
+}
+
// ReadByte reads a byte.
-func (r *trackableReader) ReadByte() (byte, error) {
+func (r *byteReadSeeker) ReadByte() (byte, error) {
var p [1]byte
- _, err := r.Reader.Read(p[:])
- r.count++
+ _, err := r.ReadSeeker.Read(p[:])
return p[0], err
}
-
-// Offset returns the number of bytes read.
-func (r *trackableReader) Offset() (int64, error) {
- return r.count, nil
-}
diff --git a/formats/packfile/parser_test.go b/formats/packfile/parser_test.go
index a7959a0..0c07daa 100644
--- a/formats/packfile/parser_test.go
+++ b/formats/packfile/parser_test.go
@@ -3,6 +3,7 @@ package packfile
import (
"bytes"
"encoding/base64"
+ "io/ioutil"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git.v4/core"
@@ -15,7 +16,7 @@ var _ = Suite(&ScannerSuite{})
func (s *ScannerSuite) TestHeader(c *C) {
data, _ := base64.StdEncoding.DecodeString(packFileWithEmptyObjects)
- p := NewParser(bytes.NewReader(data))
+ p := NewScanner(bytes.NewReader(data))
version, objects, err := p.Header()
c.Assert(err, IsNil)
c.Assert(version, Equals, VersionSupported)
@@ -25,7 +26,8 @@ func (s *ScannerSuite) TestHeader(c *C) {
func (s *ScannerSuite) TestNextObjectHeader(c *C) {
data, _ := base64.StdEncoding.DecodeString(packFileWithEmptyObjects)
- p := NewParser(bytes.NewReader(data))
+ r := bytes.NewReader(data)
+ p := NewScanner(r)
_, objects, err := p.Header()
c.Assert(err, IsNil)
@@ -39,12 +41,17 @@ func (s *ScannerSuite) TestNextObjectHeader(c *C) {
c.Assert(err, IsNil)
c.Assert(n, Equals, h.Length)
}
+
+ n, err := ioutil.ReadAll(r)
+ c.Assert(err, IsNil)
+ c.Assert(n, HasLen, 20)
}
func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObject(c *C) {
data, _ := base64.StdEncoding.DecodeString(packFileWithEmptyObjects)
- p := NewParser(bytes.NewReader(data))
+ r := bytes.NewReader(data)
+ p := NewScanner(r)
_, objects, err := p.Header()
c.Assert(err, IsNil)
@@ -53,6 +60,13 @@ func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObject(c *C) {
c.Assert(err, IsNil)
c.Assert(*h, DeepEquals, expectedHeaders[i])
}
+
+ err = p.discardObjectIfNeeded()
+ c.Assert(err, IsNil)
+
+ n, err := ioutil.ReadAll(r)
+ c.Assert(err, IsNil)
+ c.Assert(n, HasLen, 20)
}
var expectedHeaders = []ObjectHeader{