aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMiguel Molina <miguel@erizocosmi.co>2018-08-07 18:41:19 +0200
committerMiguel Molina <miguel@erizocosmi.co>2018-08-07 18:41:19 +0200
commit6a24b4c1f0cb9e5daf30fa7979f2643a967af1ad (patch)
tree970b68805183e6d6907a27cf6f796091d9dd3346
parentb173cc03edd25af3e868d83037168663ef84a329 (diff)
downloadgo-git-6a24b4c1f0cb9e5daf30fa7979f2643a967af1ad.tar.gz
*: use parser to populate non writable storages and bug fixes
Signed-off-by: Miguel Molina <miguel@erizocosmi.co>
-rw-r--r--common_test.go9
-rw-r--r--plumbing/format/idxfile/writer.go2
-rw-r--r--plumbing/format/idxfile/writer_test.go2
-rw-r--r--plumbing/format/packfile/common.go79
-rw-r--r--plumbing/format/packfile/decoder.go553
-rw-r--r--plumbing/format/packfile/decoder_test.go430
-rw-r--r--plumbing/format/packfile/encoder_advanced_test.go37
-rw-r--r--plumbing/format/packfile/encoder_test.go110
-rw-r--r--plumbing/format/packfile/packfile.go135
-rw-r--r--plumbing/format/packfile/packfile_test.go169
-rw-r--r--plumbing/format/packfile/parser.go130
-rw-r--r--plumbing/format/packfile/parser_test.go2
-rw-r--r--plumbing/object/blob_test.go23
-rw-r--r--plumbing/object/difftree_test.go16
-rw-r--r--plumbing/object/object_test.go5
-rw-r--r--plumbing/transport/test/receive_pack.go8
-rw-r--r--plumbing/transport/test/upload_pack.go5
-rw-r--r--storage/filesystem/object.go77
-rw-r--r--storage/filesystem/object_test.go6
19 files changed, 561 insertions, 1237 deletions
diff --git a/common_test.go b/common_test.go
index f8f4e61..efe1ecc 100644
--- a/common_test.go
+++ b/common_test.go
@@ -113,14 +113,7 @@ func (s *BaseSuite) NewRepositoryFromPackfile(f *fixtures.Fixture) *Repository {
p := f.Packfile()
defer p.Close()
- n := packfile.NewScanner(p)
- d, err := packfile.NewDecoder(n, storer)
- if err != nil {
- panic(err)
- }
-
- _, err = d.Decode()
- if err != nil {
+ if err := packfile.UpdateObjectStorage(storer, p); err != nil {
panic(err)
}
diff --git a/plumbing/format/idxfile/writer.go b/plumbing/format/idxfile/writer.go
index a22cf16..89b79cd 100644
--- a/plumbing/format/idxfile/writer.go
+++ b/plumbing/format/idxfile/writer.go
@@ -74,7 +74,7 @@ func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, po
}
// OnInflatedObjectContent implements packfile.Observer interface.
-func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32) error {
+func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error {
w.Add(h, uint64(pos), crc)
return nil
}
diff --git a/plumbing/format/idxfile/writer_test.go b/plumbing/format/idxfile/writer_test.go
index 780acd9..7c3cceb 100644
--- a/plumbing/format/idxfile/writer_test.go
+++ b/plumbing/format/idxfile/writer_test.go
@@ -52,7 +52,7 @@ func (s *WriterSuite) TestWriterLarge(c *C) {
c.Assert(err, IsNil)
for _, o := range fixture4GbEntries {
- err = writer.OnInflatedObjectContent(plumbing.NewHash(o.hash), o.offset, o.crc)
+ err = writer.OnInflatedObjectContent(plumbing.NewHash(o.hash), o.offset, o.crc, nil)
c.Assert(err, IsNil)
}
diff --git a/plumbing/format/packfile/common.go b/plumbing/format/packfile/common.go
index beb015d..76254f0 100644
--- a/plumbing/format/packfile/common.go
+++ b/plumbing/format/packfile/common.go
@@ -2,9 +2,11 @@ package packfile
import (
"bytes"
+ "errors"
"io"
"sync"
+ "gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
@@ -23,24 +25,24 @@ const (
maskType = uint8(112) // 0111 0000
)
-// UpdateObjectStorage updates the given storer.EncodedObjectStorer with the contents of the
+// UpdateObjectStorage updates the storer with the objects in the given
// packfile.
-func UpdateObjectStorage(s storer.EncodedObjectStorer, packfile io.Reader) error {
- if sw, ok := s.(storer.PackfileWriter); ok {
- return writePackfileToObjectStorage(sw, packfile)
+func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error {
+ if pw, ok := s.(storer.PackfileWriter); ok {
+ return WritePackfileToObjectStorage(pw, packfile)
}
- stream := NewScanner(packfile)
- d, err := NewDecoder(stream, s)
- if err != nil {
- return err
- }
-
- _, err = d.Decode()
+ updater := newPackfileStorageUpdater(s)
+ _, err := NewParser(NewScanner(packfile), updater).Parse()
return err
}
-func writePackfileToObjectStorage(sw storer.PackfileWriter, packfile io.Reader) (err error) {
+// WritePackfileToObjectStorage writes all the packfile objects into the given
+// object storage.
+func WritePackfileToObjectStorage(
+ sw storer.PackfileWriter,
+ packfile io.Reader,
+) (err error) {
w, err := sw.PackfileWriter()
if err != nil {
return err
@@ -56,3 +58,56 @@ var bufPool = sync.Pool{
return bytes.NewBuffer(nil)
},
}
+
+var errMissingObjectContent = errors.New("missing object content")
+
+type packfileStorageUpdater struct {
+ storer.Storer
+ lastSize int64
+ lastType plumbing.ObjectType
+}
+
+func newPackfileStorageUpdater(s storer.Storer) *packfileStorageUpdater {
+ return &packfileStorageUpdater{Storer: s}
+}
+
+func (p *packfileStorageUpdater) OnHeader(count uint32) error {
+ return nil
+}
+
+func (p *packfileStorageUpdater) OnInflatedObjectHeader(
+ t plumbing.ObjectType,
+ objSize int64,
+ pos int64,
+) error {
+ if p.lastSize > 0 || p.lastType != plumbing.InvalidObject {
+ return errMissingObjectContent
+ }
+
+ p.lastType = t
+ p.lastSize = objSize
+ return nil
+}
+
+func (p *packfileStorageUpdater) OnInflatedObjectContent(
+ h plumbing.Hash,
+ pos int64,
+ crc uint32,
+ content []byte,
+) error {
+ obj := new(plumbing.MemoryObject)
+ obj.SetSize(p.lastSize)
+ obj.SetType(p.lastType)
+ if _, err := obj.Write(content); err != nil {
+ return err
+ }
+
+ _, err := p.SetEncodedObject(obj)
+ p.lastSize = 0
+ p.lastType = plumbing.InvalidObject
+ return err
+}
+
+func (p *packfileStorageUpdater) OnFooter(h plumbing.Hash) error {
+ return nil
+}
diff --git a/plumbing/format/packfile/decoder.go b/plumbing/format/packfile/decoder.go
deleted file mode 100644
index 6bb0677..0000000
--- a/plumbing/format/packfile/decoder.go
+++ /dev/null
@@ -1,553 +0,0 @@
-package packfile
-
-import (
- "bytes"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// Format specifies if the packfile uses ref-deltas or ofs-deltas.
-type Format int
-
-// Possible values of the Format type.
-const (
- UnknownFormat Format = iota
- OFSDeltaFormat
- REFDeltaFormat
-)
-
-var (
- // ErrMaxObjectsLimitReached is returned by Decode when the number
- // of objects in the packfile is higher than
- // Decoder.MaxObjectsLimit.
- ErrMaxObjectsLimitReached = NewError("max. objects limit reached")
- // ErrInvalidObject is returned by Decode when an invalid object is
- // found in the packfile.
- ErrInvalidObject = NewError("invalid git object")
- // ErrPackEntryNotFound is returned by Decode when a reference in
- // the packfile references and unknown object.
- ErrPackEntryNotFound = NewError("can't find a pack entry")
- // ErrZLib is returned by Decode when there was an error unzipping
- // the packfile contents.
- ErrZLib = NewError("zlib reading error")
- // ErrCannotRecall is returned by RecallByOffset or RecallByHash if the object
- // to recall cannot be returned.
- ErrCannotRecall = NewError("cannot recall object")
- // ErrResolveDeltasNotSupported is returned if a NewDecoder is used with a
- // non-seekable scanner and without a plumbing.ObjectStorage
- ErrResolveDeltasNotSupported = NewError("resolve delta is not supported")
- // ErrNonSeekable is returned if a ReadObjectAt method is called without a
- // seekable scanner
- ErrNonSeekable = NewError("non-seekable scanner")
- // ErrRollback error making Rollback over a transaction after an error
- ErrRollback = NewError("rollback error, during set error")
- // ErrAlreadyDecoded is returned if NewDecoder is called for a second time
- ErrAlreadyDecoded = NewError("packfile was already decoded")
-)
-
-// Decoder reads and decodes packfiles from an input Scanner, if an ObjectStorer
-// was provided the decoded objects are store there. If not the decode object
-// is destroyed. The Offsets and CRCs are calculated whether an
-// ObjectStorer was provided or not.
-type Decoder struct {
- deltaBaseCache cache.Object
-
- s *Scanner
- o storer.EncodedObjectStorer
- tx storer.Transaction
-
- isDecoded bool
-
- // hasBuiltIndex indicates if the index is fully built or not. If it is not,
- // will be built incrementally while decoding.
- hasBuiltIndex bool
- idx idxfile.Index
- writer *idxfile.Writer
-
- offsetToType map[int64]plumbing.ObjectType
- decoderType plumbing.ObjectType
- offsetToHash map[int64]plumbing.Hash
-}
-
-// NewDecoder returns a new Decoder that decodes a Packfile using the given
-// Scanner and stores the objects in the provided EncodedObjectStorer. ObjectStorer can be nil, in this
-// If the passed EncodedObjectStorer is nil, objects are not stored, but
-// offsets on the Packfile and CRCs are calculated.
-//
-// If EncodedObjectStorer is nil and the Scanner is not Seekable, ErrNonSeekable is
-// returned.
-//
-// If the ObjectStorer implements storer.Transactioner, a transaction is created
-// during the Decode execution. If anything fails, Rollback is called
-func NewDecoder(s *Scanner, o storer.EncodedObjectStorer) (*Decoder, error) {
- return NewDecoderForType(s, o, plumbing.AnyObject,
- cache.NewObjectLRUDefault())
-}
-
-// NewDecoderWithCache is a version of NewDecoder where cache can be specified.
-func NewDecoderWithCache(s *Scanner, o storer.EncodedObjectStorer,
- cacheObject cache.Object) (*Decoder, error) {
-
- return NewDecoderForType(s, o, plumbing.AnyObject, cacheObject)
-}
-
-// NewDecoderForType returns a new Decoder but in this case for a specific object type.
-// When an object is read using this Decoder instance and it is not of the same type of
-// the specified one, nil will be returned. This is intended to avoid the content
-// deserialization of all the objects.
-//
-// cacheObject is a cache.Object implementation that is used to speed up the
-// process. If cache is not needed you can pass nil. To create an LRU cache
-// object with the default size you can use the helper cache.ObjectLRUDefault().
-func NewDecoderForType(s *Scanner, o storer.EncodedObjectStorer,
- t plumbing.ObjectType, cacheObject cache.Object) (*Decoder, error) {
-
- if t == plumbing.OFSDeltaObject ||
- t == plumbing.REFDeltaObject ||
- t == plumbing.InvalidObject {
- return nil, plumbing.ErrInvalidType
- }
-
- if !canResolveDeltas(s, o) {
- return nil, ErrResolveDeltasNotSupported
- }
-
- return &Decoder{
- s: s,
- o: o,
- deltaBaseCache: cacheObject,
-
- idx: idxfile.NewMemoryIndex(),
- writer: new(idxfile.Writer),
- offsetToType: make(map[int64]plumbing.ObjectType),
- offsetToHash: make(map[int64]plumbing.Hash),
- decoderType: t,
- }, nil
-}
-
-func canResolveDeltas(s *Scanner, o storer.EncodedObjectStorer) bool {
- return s.IsSeekable || o != nil
-}
-
-// Decode reads a packfile and stores it in the value pointed to by s. The
-// offsets and the CRCs are calculated by this method
-func (d *Decoder) Decode() (checksum plumbing.Hash, err error) {
- defer func() { d.isDecoded = true }()
-
- if d.isDecoded {
- return plumbing.ZeroHash, ErrAlreadyDecoded
- }
-
- if err := d.doDecode(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- checksum, err = d.s.Checksum()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- if !d.hasBuiltIndex {
- d.writer.OnFooter(checksum)
-
- idx, err := d.writer.Index()
- if err != nil {
- return plumbing.ZeroHash, err
- }
- d.SetIndex(idx)
- }
-
- return checksum, err
-}
-
-func (d *Decoder) fillOffsetsToHashes() error {
- entries, err := d.idx.Entries()
- if err != nil {
- return err
- }
-
- for {
- e, err := entries.Next()
- if err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
-
- d.offsetToHash[int64(e.Offset)] = e.Hash
- }
-
- return entries.Close()
-}
-
-func (d *Decoder) doDecode() error {
- _, count, err := d.s.Header()
- if err != nil {
- return err
- }
-
- if !d.hasBuiltIndex {
- d.writer.OnHeader(count)
- }
-
- if d.hasBuiltIndex && !d.s.IsSeekable {
- if err := d.fillOffsetsToHashes(); err != nil {
- return err
- }
- }
-
- _, isTxStorer := d.o.(storer.Transactioner)
- switch {
- case d.o == nil:
- err = d.decodeObjects(int(count))
- case isTxStorer:
- err = d.decodeObjectsWithObjectStorerTx(int(count))
- default:
- err = d.decodeObjectsWithObjectStorer(int(count))
- }
-
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (d *Decoder) decodeObjects(count int) error {
- for i := 0; i < count; i++ {
- if _, err := d.DecodeObject(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Decoder) decodeObjectsWithObjectStorer(count int) error {
- for i := 0; i < count; i++ {
- obj, err := d.DecodeObject()
- if err != nil {
- return err
- }
-
- if _, err := d.o.SetEncodedObject(obj); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Decoder) decodeObjectsWithObjectStorerTx(count int) error {
- d.tx = d.o.(storer.Transactioner).Begin()
-
- for i := 0; i < count; i++ {
- obj, err := d.DecodeObject()
- if err != nil {
- return err
- }
-
- if _, err := d.tx.SetEncodedObject(obj); err != nil {
- if rerr := d.tx.Rollback(); rerr != nil {
- return ErrRollback.AddDetails(
- "error: %s, during tx.Set error: %s", rerr, err,
- )
- }
-
- return err
- }
-
- }
-
- return d.tx.Commit()
-}
-
-// DecodeObject reads the next object from the scanner and returns it. This
-// method can be used in replacement of the Decode method, to work in a
-// interactive way. If you created a new decoder instance using NewDecoderForType
-// constructor, if the object decoded is not equals to the specified one, nil will
-// be returned
-func (d *Decoder) DecodeObject() (plumbing.EncodedObject, error) {
- return d.doDecodeObject(d.decoderType)
-}
-
-func (d *Decoder) doDecodeObject(t plumbing.ObjectType) (plumbing.EncodedObject, error) {
- h, err := d.s.NextObjectHeader()
- if err != nil {
- return nil, err
- }
-
- if t == plumbing.AnyObject {
- return d.decodeByHeader(h)
- }
-
- return d.decodeIfSpecificType(h)
-}
-
-func (d *Decoder) decodeIfSpecificType(h *ObjectHeader) (plumbing.EncodedObject, error) {
- var (
- obj plumbing.EncodedObject
- realType plumbing.ObjectType
- err error
- )
- switch h.Type {
- case plumbing.OFSDeltaObject:
- realType, err = d.ofsDeltaType(h.OffsetReference)
- case plumbing.REFDeltaObject:
- realType, err = d.refDeltaType(h.Reference)
- if err == plumbing.ErrObjectNotFound {
- obj, err = d.decodeByHeader(h)
- if err != nil {
- realType = obj.Type()
- }
- }
- default:
- realType = h.Type
- }
-
- if err != nil {
- return nil, err
- }
-
- d.offsetToType[h.Offset] = realType
-
- if d.decoderType == realType {
- if obj != nil {
- return obj, nil
- }
-
- return d.decodeByHeader(h)
- }
-
- return nil, nil
-}
-
-func (d *Decoder) ofsDeltaType(offset int64) (plumbing.ObjectType, error) {
- t, ok := d.offsetToType[offset]
- if !ok {
- return plumbing.InvalidObject, plumbing.ErrObjectNotFound
- }
-
- return t, nil
-}
-
-func (d *Decoder) refDeltaType(ref plumbing.Hash) (plumbing.ObjectType, error) {
- offset, err := d.idx.FindOffset(ref)
- if err != nil {
- return plumbing.InvalidObject, plumbing.ErrObjectNotFound
- }
-
- return d.ofsDeltaType(offset)
-}
-
-func (d *Decoder) decodeByHeader(h *ObjectHeader) (plumbing.EncodedObject, error) {
- obj := d.newObject()
- obj.SetSize(h.Length)
- obj.SetType(h.Type)
-
- var crc uint32
- var err error
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- crc, err = d.fillRegularObjectContent(obj)
- case plumbing.REFDeltaObject:
- crc, err = d.fillREFDeltaObjectContent(obj, h.Reference)
- case plumbing.OFSDeltaObject:
- crc, err = d.fillOFSDeltaObjectContent(obj, h.OffsetReference)
- default:
- err = ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-
- if err != nil {
- return obj, err
- }
-
- if !d.hasBuiltIndex {
- d.writer.Add(obj.Hash(), uint64(h.Offset), crc)
- }
-
- d.offsetToHash[h.Offset] = obj.Hash()
-
- return obj, nil
-}
-
-func (d *Decoder) newObject() plumbing.EncodedObject {
- if d.o == nil {
- return &plumbing.MemoryObject{}
- }
-
- return d.o.NewEncodedObject()
-}
-
-// DecodeObjectAt reads an object at the given location. Every EncodedObject
-// returned is added into a internal index. This is intended to be able to regenerate
-// objects from deltas (offset deltas or reference deltas) without an package index
-// (.idx file). If Decode wasn't called previously objects offset should provided
-// using the SetOffsets method. It decodes the object regardless of the Decoder
-// type.
-func (d *Decoder) DecodeObjectAt(offset int64) (plumbing.EncodedObject, error) {
- if !d.s.IsSeekable {
- return nil, ErrNonSeekable
- }
-
- beforeJump, err := d.s.SeekFromStart(offset)
- if err != nil {
- return nil, err
- }
-
- defer func() {
- _, seekErr := d.s.SeekFromStart(beforeJump)
- if err == nil {
- err = seekErr
- }
- }()
-
- return d.doDecodeObject(plumbing.AnyObject)
-}
-
-func (d *Decoder) fillRegularObjectContent(obj plumbing.EncodedObject) (uint32, error) {
- w, err := obj.Writer()
- if err != nil {
- return 0, err
- }
-
- _, crc, err := d.s.NextObject(w)
- return crc, err
-}
-
-func (d *Decoder) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) (uint32, error) {
- buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset()
- _, crc, err := d.s.NextObject(buf)
- if err != nil {
- return 0, err
- }
-
- base, ok := d.cacheGet(ref)
- if !ok {
- base, err = d.recallByHash(ref)
- if err != nil {
- return 0, err
- }
- }
-
- obj.SetType(base.Type())
- err = ApplyDelta(obj, base, buf.Bytes())
- d.cachePut(obj)
- bufPool.Put(buf)
-
- return crc, err
-}
-
-func (d *Decoder) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) (uint32, error) {
- buf := bytes.NewBuffer(nil)
- _, crc, err := d.s.NextObject(buf)
- if err != nil {
- return 0, err
- }
-
- h, ok := d.offsetToHash[offset]
- var base plumbing.EncodedObject
- if ok {
- base, ok = d.cacheGet(h)
- }
-
- if !ok {
- base, err = d.recallByOffset(offset)
- if err != nil {
- return 0, err
- }
-
- d.cachePut(base)
- }
-
- obj.SetType(base.Type())
- err = ApplyDelta(obj, base, buf.Bytes())
- d.cachePut(obj)
-
- return crc, err
-}
-
-func (d *Decoder) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
- if d.deltaBaseCache == nil {
- return nil, false
- }
-
- return d.deltaBaseCache.Get(h)
-}
-
-func (d *Decoder) cachePut(obj plumbing.EncodedObject) {
- if d.deltaBaseCache == nil {
- return
- }
-
- d.deltaBaseCache.Put(obj)
-}
-
-func (d *Decoder) recallByOffset(o int64) (plumbing.EncodedObject, error) {
- if d.s.IsSeekable {
- return d.DecodeObjectAt(o)
- }
-
- hash, ok := d.offsetToHash[o]
- if !ok {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return d.recallByHashNonSeekable(hash)
-}
-
-func (d *Decoder) recallByHash(h plumbing.Hash) (plumbing.EncodedObject, error) {
- if d.s.IsSeekable {
- if offset, err := d.idx.FindOffset(h); err == nil {
- return d.DecodeObjectAt(offset)
- }
- }
-
- return d.recallByHashNonSeekable(h)
-}
-
-// recallByHashNonSeekable if we are in a transaction the objects are read from
-// the transaction, if not are directly read from the ObjectStorer
-func (d *Decoder) recallByHashNonSeekable(h plumbing.Hash) (obj plumbing.EncodedObject, err error) {
- if d.tx != nil {
- obj, err = d.tx.EncodedObject(plumbing.AnyObject, h)
- } else if d.o != nil {
- obj, err = d.o.EncodedObject(plumbing.AnyObject, h)
- } else {
- return nil, plumbing.ErrObjectNotFound
- }
-
- if err != plumbing.ErrObjectNotFound {
- return obj, err
- }
-
- return nil, plumbing.ErrObjectNotFound
-}
-
-// SetIndex sets an index for the packfile. It is recommended to set this.
-// The index might be read from a file or reused from a previous Decoder usage
-// (see Index function).
-func (d *Decoder) SetIndex(idx idxfile.Index) {
- d.hasBuiltIndex = true
- d.idx = idx
-}
-
-// Index returns the index for the packfile. If index was set with SetIndex,
-// Index will return it. Otherwise, it will return an index that is built while
-// decoding. If neither SetIndex was called with a full index or Decode called
-// for the whole packfile, then the returned index will be incomplete.
-func (d *Decoder) Index() idxfile.Index {
- return d.idx
-}
-
-// Close closes the Scanner. usually this mean that the whole reader is read and
-// discarded
-func (d *Decoder) Close() error {
- return d.s.Close()
-}
diff --git a/plumbing/format/packfile/decoder_test.go b/plumbing/format/packfile/decoder_test.go
deleted file mode 100644
index d4f7145..0000000
--- a/plumbing/format/packfile/decoder_test.go
+++ /dev/null
@@ -1,430 +0,0 @@
-package packfile_test
-
-import (
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
- "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage/filesystem"
- "gopkg.in/src-d/go-git.v4/storage/memory"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-billy.v4/memfs"
- "gopkg.in/src-d/go-git-fixtures.v3"
-)
-
-type ReaderSuite struct {
- fixtures.Suite
-}
-
-var _ = Suite(&ReaderSuite{})
-
-func (s *ReaderSuite) TestNewDecodeNonSeekable(c *C) {
- scanner := packfile.NewScanner(nil)
- d, err := packfile.NewDecoder(scanner, nil)
-
- c.Assert(d, IsNil)
- c.Assert(err, NotNil)
-}
-
-func (s *ReaderSuite) TestDecode(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- scanner := packfile.NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- defer d.Close()
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
-
- assertObjects(c, storage, expectedHashes)
- })
-}
-
-func (s *ReaderSuite) TestDecodeByTypeRefDelta(c *C) {
- f := fixtures.Basic().ByTag("ref-delta").One()
-
- storage := memory.NewStorage()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject,
- cache.NewObjectLRUDefault())
- c.Assert(err, IsNil)
-
- // Index required to decode by ref-delta.
- d.SetIndex(getIndexFromIdxFile(f.Idx()))
-
- defer d.Close()
-
- _, count, err := scanner.Header()
- c.Assert(err, IsNil)
-
- var i uint32
- for i = 0; i < count; i++ {
- obj, err := d.DecodeObject()
- c.Assert(err, IsNil)
-
- if obj != nil {
- c.Assert(obj.Type(), Equals, plumbing.CommitObject)
- }
- }
-}
-
-func (s *ReaderSuite) TestDecodeByTypeRefDeltaError(c *C) {
- fixtures.Basic().ByTag("ref-delta").Test(c, func(f *fixtures.Fixture) {
- storage := memory.NewStorage()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, storage,
- plumbing.CommitObject, cache.NewObjectLRUDefault())
- c.Assert(err, IsNil)
-
- defer d.Close()
-
- _, count, err := scanner.Header()
- c.Assert(err, IsNil)
-
- isError := false
- var i uint32
- for i = 0; i < count; i++ {
- _, err := d.DecodeObject()
- if err != nil {
- isError = true
- break
- }
- }
- c.Assert(isError, Equals, true)
- })
-
-}
-
-func (s *ReaderSuite) TestDecodeByType(c *C) {
- ts := []plumbing.ObjectType{
- plumbing.CommitObject,
- plumbing.TagObject,
- plumbing.TreeObject,
- plumbing.BlobObject,
- }
-
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- for _, t := range ts {
- storage := memory.NewStorage()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, storage, t,
- cache.NewObjectLRUDefault())
- c.Assert(err, IsNil)
-
- // when the packfile is ref-delta based, the offsets are required
- if f.Is("ref-delta") {
- d.SetIndex(getIndexFromIdxFile(f.Idx()))
- }
-
- defer d.Close()
-
- _, count, err := scanner.Header()
- c.Assert(err, IsNil)
-
- var i uint32
- for i = 0; i < count; i++ {
- obj, err := d.DecodeObject()
- c.Assert(err, IsNil)
-
- if obj != nil {
- c.Assert(obj.Type(), Equals, t)
- }
- }
- }
- })
-}
-
-func (s *ReaderSuite) TestDecodeByTypeConstructor(c *C) {
- f := fixtures.Basic().ByTag("packfile").One()
- storage := memory.NewStorage()
- scanner := packfile.NewScanner(f.Packfile())
-
- _, err := packfile.NewDecoderForType(scanner, storage,
- plumbing.OFSDeltaObject, cache.NewObjectLRUDefault())
- c.Assert(err, Equals, plumbing.ErrInvalidType)
-
- _, err = packfile.NewDecoderForType(scanner, storage,
- plumbing.REFDeltaObject, cache.NewObjectLRUDefault())
-
- c.Assert(err, Equals, plumbing.ErrInvalidType)
-
- _, err = packfile.NewDecoderForType(scanner, storage, plumbing.InvalidObject,
- cache.NewObjectLRUDefault())
- c.Assert(err, Equals, plumbing.ErrInvalidType)
-}
-
-func (s *ReaderSuite) TestDecodeMultipleTimes(c *C) {
- f := fixtures.Basic().ByTag("packfile").One()
- scanner := packfile.NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- defer d.Close()
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
-
- ch, err = d.Decode()
- c.Assert(err, Equals, packfile.ErrAlreadyDecoded)
- c.Assert(ch, Equals, plumbing.ZeroHash)
-}
-
-func (s *ReaderSuite) TestDecodeInMemory(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoder(scanner, memory.NewStorage())
- c.Assert(err, IsNil)
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
- })
-}
-
-type nonSeekableReader struct {
- r io.Reader
-}
-
-func (nsr nonSeekableReader) Read(b []byte) (int, error) {
- return nsr.r.Read(b)
-}
-
-func (s *ReaderSuite) TestDecodeNoSeekableWithTxStorer(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- reader := nonSeekableReader{
- r: f.Packfile(),
- }
-
- scanner := packfile.NewScanner(reader)
-
- var storage storer.EncodedObjectStorer = memory.NewStorage()
- _, isTxStorer := storage.(storer.Transactioner)
- c.Assert(isTxStorer, Equals, true)
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- defer d.Close()
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
-
- assertObjects(c, storage, expectedHashes)
- })
-}
-
-func (s *ReaderSuite) TestDecodeNoSeekableWithoutTxStorer(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- reader := nonSeekableReader{
- r: f.Packfile(),
- }
-
- scanner := packfile.NewScanner(reader)
-
- var storage storer.EncodedObjectStorer
- storage, _ = filesystem.NewStorage(memfs.New())
- _, isTxStorer := storage.(storer.Transactioner)
- c.Assert(isTxStorer, Equals, false)
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- defer d.Close()
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
-
- assertObjects(c, storage, expectedHashes)
- })
-}
-
-var expectedHashes = []string{
- "918c48b83bd081e863dbe1b80f8998f058cd8294",
- "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
- "1669dce138d9b841a518c64b10914d88f5e488ea",
- "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
- "b8e471f58bcbca63b07bda20e428190409c2db47",
- "35e85108805c84807bc66a02d91535e1e24b38b9",
- "b029517f6300c2da0f4b651b8642506cd6aaf45d",
- "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
- "d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
- "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
- "d5c0f4ab811897cadf03aec358ae60d21f91c50d",
- "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
- "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
- "9dea2395f5403188298c1dabe8bdafe562c491e3",
- "586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
- "9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
- "5a877e6a906a2743ad6e45d99c1793642aaf8eda",
- "c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
- "a8d315b2b1c615d43042c3a62402b8a54288cf5c",
- "a39771a7651f97faf5c72e08224d857fc35133db",
- "880cd14280f4b9b6ed3986d6671f907d7cc2a198",
- "fb72698cab7617ac416264415f13224dfd7a165e",
- "4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
- "eba74343e2f15d62adedfd8c883ee0262b5c8021",
- "c2d30fa8ef288618f65f6eed6e168e0d514886f4",
- "8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
- "aa9b383c260e1d05fbbf6b30a02914555e20c725",
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
- "dbd3641b371024f44d0e469a9c8f5457b0660de1",
- "e8d3ffab552895c19b9fcf7aa264d277cde33881",
- "7e59600739c96546163833214c36459e324bad0a",
-}
-
-func (s *ReaderSuite) TestDecodeCRCs(c *C) {
- f := fixtures.Basic().ByTag("ofs-delta").One()
-
- scanner := packfile.NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- _, err = d.Decode()
- c.Assert(err, IsNil)
-
- var sum uint64
- iter, err := d.Index().Entries()
- c.Assert(err, IsNil)
-
- for {
- e, err := iter.Next()
- if err == io.EOF {
- break
- }
-
- c.Assert(err, IsNil)
- sum += uint64(e.CRC32)
- }
-
- c.Assert(int(sum), Equals, 78022211966)
-}
-
-func (s *ReaderSuite) TestDecodeObjectAt(c *C) {
- f := fixtures.Basic().One()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- // when the packfile is ref-delta based, the offsets are required
- if f.Is("ref-delta") {
- d.SetIndex(getIndexFromIdxFile(f.Idx()))
- }
-
- // the objects at reference 186, is a delta, so should be recall,
- // without being read before.
- obj, err := d.DecodeObjectAt(186)
- c.Assert(err, IsNil)
- c.Assert(obj.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
-}
-
-func (s *ReaderSuite) TestDecodeObjectAtForType(c *C) {
- f := fixtures.Basic().One()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, nil, plumbing.TreeObject,
- cache.NewObjectLRUDefault())
- c.Assert(err, IsNil)
-
- // when the packfile is ref-delta based, the offsets are required
- if f.Is("ref-delta") {
- d.SetIndex(getIndexFromIdxFile(f.Idx()))
- }
-
- // the objects at reference 186, is a delta, so should be recall,
- // without being read before.
- obj, err := d.DecodeObjectAt(186)
- c.Assert(err, IsNil)
- c.Assert(obj.Type(), Equals, plumbing.CommitObject)
- c.Assert(obj.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
-}
-
-func (s *ReaderSuite) TestIndex(c *C) {
- f := fixtures.Basic().One()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- c.Assert(indexEntries(c, d), Equals, 0)
-
- _, err = d.Decode()
- c.Assert(err, IsNil)
-
- c.Assert(indexEntries(c, d), Equals, 31)
-}
-
-func indexEntries(c *C, d *packfile.Decoder) int {
- var count int
- entries, err := d.Index().Entries()
- c.Assert(err, IsNil)
-
- for {
- _, err := entries.Next()
- if err == io.EOF {
- break
- }
-
- c.Assert(err, IsNil)
- count++
- }
-
- return count
-}
-
-func (s *ReaderSuite) TestSetIndex(c *C) {
- f := fixtures.Basic().One()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- w := new(idxfile.Writer)
- h := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
- w.Add(h, uint64(42), 0)
- w.OnFooter(plumbing.ZeroHash)
-
- var idx idxfile.Index
- idx, err = w.Index()
- c.Assert(err, IsNil)
- d.SetIndex(idx)
-
- idx = d.Index()
- c.Assert(indexEntries(c, d), Equals, 1)
-
- offset, err := idx.FindOffset(h)
- c.Assert(err, IsNil)
- c.Assert(offset, Equals, int64(42))
-}
-
-func assertObjects(c *C, s storer.EncodedObjectStorer, expects []string) {
- i, err := s.IterEncodedObjects(plumbing.AnyObject)
- c.Assert(err, IsNil)
-
- var count int
- err = i.ForEach(func(plumbing.EncodedObject) error { count++; return nil })
- c.Assert(err, IsNil)
- c.Assert(count, Equals, len(expects))
-
- for _, exp := range expects {
- obt, err := s.EncodedObject(plumbing.AnyObject, plumbing.NewHash(exp))
- c.Assert(err, IsNil)
- c.Assert(obt.Hash().String(), Equals, exp)
- }
-}
-
-func getIndexFromIdxFile(r io.Reader) idxfile.Index {
- idxf := idxfile.NewMemoryIndex()
- d := idxfile.NewDecoder(r)
- if err := d.Decode(idxf); err != nil {
- panic(err)
- }
-
- return idxf
-}
diff --git a/plumbing/format/packfile/encoder_advanced_test.go b/plumbing/format/packfile/encoder_advanced_test.go
index 8cc7180..6ffebc2 100644
--- a/plumbing/format/packfile/encoder_advanced_test.go
+++ b/plumbing/format/packfile/encoder_advanced_test.go
@@ -2,14 +2,16 @@ package packfile_test
import (
"bytes"
+ "io"
"math/rand"
"testing"
+ "gopkg.in/src-d/go-billy.v3/memfs"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
. "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
- "gopkg.in/src-d/go-git.v4/storage/memory"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git-fixtures.v3"
@@ -34,7 +36,6 @@ func (s *EncoderAdvancedSuite) TestEncodeDecode(c *C) {
c.Assert(err, IsNil)
s.testEncodeDecode(c, storage, 10)
})
-
}
func (s *EncoderAdvancedSuite) TestEncodeDecodeNoDeltaCompression(c *C) {
@@ -52,8 +53,11 @@ func (s *EncoderAdvancedSuite) TestEncodeDecodeNoDeltaCompression(c *C) {
})
}
-func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer, packWindow uint) {
-
+func (s *EncoderAdvancedSuite) testEncodeDecode(
+ c *C,
+ storage storer.Storer,
+ packWindow uint,
+) {
objIter, err := storage.IterEncodedObjects(plumbing.AnyObject)
c.Assert(err, IsNil)
@@ -80,16 +84,31 @@ func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer, pac
encodeHash, err := enc.Encode(hashes, packWindow)
c.Assert(err, IsNil)
- scanner := NewScanner(buf)
- storage = memory.NewStorage()
- d, err := NewDecoder(scanner, storage)
+ f, err := memfs.New().Create("packfile")
+ c.Assert(err, IsNil)
+
+ _, err = f.Write(buf.Bytes())
+ c.Assert(err, IsNil)
+
+ _, err = f.Seek(0, io.SeekStart)
c.Assert(err, IsNil)
- decodeHash, err := d.Decode()
+
+ w := new(idxfile.Writer)
+ _, err = NewParser(NewScanner(f), w).Parse()
+ c.Assert(err, IsNil)
+ index, err := w.Index()
+ c.Assert(err, IsNil)
+
+ _, err = f.Seek(0, io.SeekStart)
c.Assert(err, IsNil)
+ p := NewPackfile(index, f)
+
+ decodeHash, err := p.ID()
+ c.Assert(err, IsNil)
c.Assert(encodeHash, Equals, decodeHash)
- objIter, err = storage.IterEncodedObjects(plumbing.AnyObject)
+ objIter, err = p.GetAll()
c.Assert(err, IsNil)
obtainedObjects := map[plumbing.Hash]bool{}
err = objIter.ForEach(func(o plumbing.EncodedObject) error {
diff --git a/plumbing/format/packfile/encoder_test.go b/plumbing/format/packfile/encoder_test.go
index 84d03fb..7b6dde2 100644
--- a/plumbing/format/packfile/encoder_test.go
+++ b/plumbing/format/packfile/encoder_test.go
@@ -2,8 +2,12 @@ package packfile
import (
"bytes"
+ "io"
+ stdioutil "io/ioutil"
+ "gopkg.in/src-d/go-billy.v3/memfs"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/storage/memory"
. "gopkg.in/check.v1"
@@ -130,24 +134,20 @@ func (s *EncoderSuite) simpleDeltaTest(c *C) {
})
c.Assert(err, IsNil)
- scanner := NewScanner(s.buf)
-
- storage := memory.NewStorage()
- d, err := NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
-
- decHash, err := d.Decode()
+ p, cleanup := packfileFromReader(c, s.buf)
+ defer cleanup()
+ decHash, err := p.ID()
c.Assert(err, IsNil)
c.Assert(encHash, Equals, decHash)
- decSrc, err := storage.EncodedObject(srcObject.Type(), srcObject.Hash())
+ decSrc, err := p.Get(srcObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decSrc, DeepEquals, srcObject)
+ objectsEqual(c, decSrc, srcObject)
- decTarget, err := storage.EncodedObject(targetObject.Type(), targetObject.Hash())
+ decTarget, err := p.Get(targetObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decTarget, DeepEquals, targetObject)
+ objectsEqual(c, decTarget, targetObject)
}
func (s *EncoderSuite) deltaOverDeltaTest(c *C) {
@@ -173,27 +173,24 @@ func (s *EncoderSuite) deltaOverDeltaTest(c *C) {
})
c.Assert(err, IsNil)
- scanner := NewScanner(s.buf)
- storage := memory.NewStorage()
- d, err := NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
-
- decHash, err := d.Decode()
+ p, cleanup := packfileFromReader(c, s.buf)
+ defer cleanup()
+ decHash, err := p.ID()
c.Assert(err, IsNil)
c.Assert(encHash, Equals, decHash)
- decSrc, err := storage.EncodedObject(srcObject.Type(), srcObject.Hash())
+ decSrc, err := p.Get(srcObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decSrc, DeepEquals, srcObject)
+ objectsEqual(c, decSrc, srcObject)
- decTarget, err := storage.EncodedObject(targetObject.Type(), targetObject.Hash())
+ decTarget, err := p.Get(targetObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decTarget, DeepEquals, targetObject)
+ objectsEqual(c, decTarget, targetObject)
- decOtherTarget, err := storage.EncodedObject(otherTargetObject.Type(), otherTargetObject.Hash())
+ decOtherTarget, err := p.Get(otherTargetObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decOtherTarget, DeepEquals, otherTargetObject)
+ objectsEqual(c, decOtherTarget, otherTargetObject)
}
func (s *EncoderSuite) deltaOverDeltaCyclicTest(c *C) {
@@ -248,29 +245,70 @@ func (s *EncoderSuite) deltaOverDeltaCyclicTest(c *C) {
})
c.Assert(err, IsNil)
- scanner := NewScanner(s.buf)
- storage := memory.NewStorage()
- d, err := NewDecoder(scanner, storage)
+ p, cleanup := packfileFromReader(c, s.buf)
+ defer cleanup()
+ decHash, err := p.ID()
c.Assert(err, IsNil)
- decHash, err := d.Decode()
+ c.Assert(encHash, Equals, decHash)
+
+ decSrc, err := p.Get(o1.Hash())
c.Assert(err, IsNil)
+ objectsEqual(c, decSrc, o1)
- c.Assert(encHash, Equals, decHash)
+ decTarget, err := p.Get(o2.Hash())
+ c.Assert(err, IsNil)
+ objectsEqual(c, decTarget, o2)
+
+ decOtherTarget, err := p.Get(o3.Hash())
+ c.Assert(err, IsNil)
+ objectsEqual(c, decOtherTarget, o3)
+
+ decAnotherTarget, err := p.Get(o4.Hash())
+ c.Assert(err, IsNil)
+ objectsEqual(c, decAnotherTarget, o4)
+}
+
+func objectsEqual(c *C, o1, o2 plumbing.EncodedObject) {
+ c.Assert(o1.Type(), Equals, o2.Type())
+ c.Assert(o1.Hash(), Equals, o2.Hash())
+ c.Assert(o1.Size(), Equals, o2.Size())
- decSrc, err := storage.EncodedObject(o1.Type(), o1.Hash())
+ r1, err := o1.Reader()
c.Assert(err, IsNil)
- c.Assert(decSrc, DeepEquals, o1)
- decTarget, err := storage.EncodedObject(o2.Type(), o2.Hash())
+ b1, err := stdioutil.ReadAll(r1)
c.Assert(err, IsNil)
- c.Assert(decTarget, DeepEquals, o2)
- decOtherTarget, err := storage.EncodedObject(o3.Type(), o3.Hash())
+ r2, err := o2.Reader()
c.Assert(err, IsNil)
- c.Assert(decOtherTarget, DeepEquals, o3)
- decAnotherTarget, err := storage.EncodedObject(o4.Type(), o4.Hash())
+ b2, err := stdioutil.ReadAll(r2)
c.Assert(err, IsNil)
- c.Assert(decAnotherTarget, DeepEquals, o4)
+
+ c.Assert(bytes.Compare(b1, b2), Equals, 0)
+}
+
+func packfileFromReader(c *C, buf *bytes.Buffer) (*Packfile, func()) {
+ file, err := memfs.New().Create("packfile")
+ c.Assert(err, IsNil)
+
+ _, err = file.Write(buf.Bytes())
+ c.Assert(err, IsNil)
+
+ _, err = file.Seek(0, io.SeekStart)
+ c.Assert(err, IsNil)
+
+ scanner := NewScanner(file)
+
+ w := new(idxfile.Writer)
+ _, err = NewParser(scanner, w).Parse()
+ c.Assert(err, IsNil)
+
+ index, err := w.Index()
+ c.Assert(err, IsNil)
+
+ return NewPackfile(index, file), func() {
+ c.Assert(file.Close(), IsNil)
+ }
}
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
index 2e831f2..37743ba 100644
--- a/plumbing/format/packfile/packfile.go
+++ b/plumbing/format/packfile/packfile.go
@@ -3,38 +3,55 @@ package packfile
import (
"bytes"
"io"
+ stdioutil "io/ioutil"
"os"
- billy "gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
+var (
+ // ErrInvalidObject is returned by Decode when an invalid object is
+ // found in the packfile.
+ ErrInvalidObject = NewError("invalid git object")
+ // ErrZLib is returned by Decode when there was an error unzipping
+ // the packfile contents.
+ ErrZLib = NewError("zlib reading error")
+)
+
// Packfile allows retrieving information from inside a packfile.
type Packfile struct {
idxfile.Index
- billy.File
+ file io.ReadSeeker
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
}
-// NewPackfile returns a packfile representation for the given packfile file
-// and packfile idx.
-func NewPackfile(index idxfile.Index, file billy.File) *Packfile {
+// NewPackfileWithCache creates a new Packfile with the given object cache.
+func NewPackfileWithCache(
+ index idxfile.Index,
+ file io.ReadSeeker,
+ cache cache.Object,
+) *Packfile {
s := NewScanner(file)
-
return &Packfile{
index,
file,
s,
- cache.NewObjectLRUDefault(),
+ cache,
make(map[int64]plumbing.ObjectType),
}
}
+// NewPackfile returns a packfile representation for the given packfile file
+// and packfile idx.
+func NewPackfile(index idxfile.Index, file io.ReadSeeker) *Packfile {
+ return NewPackfileWithCache(index, file, cache.NewObjectLRUDefault())
+}
+
// Get retrieves the encoded object in the packfile with the given hash.
func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
offset, err := p.FindOffset(h)
@@ -334,35 +351,49 @@ func (p *Packfile) cachePut(obj plumbing.EncodedObject) {
// The iterator returned is not thread-safe, it should be used in the same
// thread as the Packfile instance.
func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) {
- entries, err := p.Entries()
- if err != nil {
- return nil, err
- }
+ return p.GetByType(plumbing.AnyObject)
+}
- return &objectIter{
- // Easiest way to provide an object decoder is just to pass a Packfile
- // instance. To not mess with the seeks, it's a new instance with a
- // different scanner but the same cache and offset to hash map for
- // reusing as much cache as possible.
- p: &Packfile{
- p.Index,
- p.File,
- NewScanner(p.File),
- p.deltaBaseCache,
- p.offsetToType,
- },
- iter: entries,
- }, nil
+// GetByType returns all the objects of the given type.
+func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) {
+ switch typ {
+ case plumbing.AnyObject,
+ plumbing.BlobObject,
+ plumbing.TreeObject,
+ plumbing.CommitObject,
+ plumbing.TagObject:
+ entries, err := p.Entries()
+ if err != nil {
+ return nil, err
+ }
+
+ return &objectIter{
+ // Easiest way to provide an object decoder is just to pass a Packfile
+ // instance. To not mess with the seeks, it's a new instance with a
+ // different scanner but the same cache and offset to hash map for
+ // reusing as much cache as possible.
+ p: p,
+ iter: entries,
+ typ: typ,
+ }, nil
+ default:
+ return nil, plumbing.ErrInvalidType
+ }
}
// ID returns the ID of the packfile, which is the checksum at the end of it.
func (p *Packfile) ID() (plumbing.Hash, error) {
- if _, err := p.File.Seek(-20, io.SeekEnd); err != nil {
+ prev, err := p.file.Seek(-20, io.SeekEnd)
+ if err != nil {
return plumbing.ZeroHash, err
}
var hash plumbing.Hash
- if _, err := io.ReadFull(p.File, hash[:]); err != nil {
+ if _, err := io.ReadFull(p.file, hash[:]); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ if _, err := p.file.Seek(prev, io.SeekStart); err != nil {
return plumbing.ZeroHash, err
}
@@ -371,25 +402,59 @@ func (p *Packfile) ID() (plumbing.Hash, error) {
// Close the packfile and its resources.
func (p *Packfile) Close() error {
- return p.File.Close()
+ closer, ok := p.file.(io.Closer)
+ if !ok {
+ return nil
+ }
+
+ return closer.Close()
}
-type objectDecoder interface {
- nextObject() (plumbing.EncodedObject, error)
+// MemoryObjectFromDisk converts a DiskObject to a MemoryObject.
+func MemoryObjectFromDisk(obj plumbing.EncodedObject) (plumbing.EncodedObject, error) {
+ o2 := new(plumbing.MemoryObject)
+ o2.SetType(obj.Type())
+ o2.SetSize(obj.Size())
+
+ r, err := obj.Reader()
+ if err != nil {
+ return nil, err
+ }
+
+ data, err := stdioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := o2.Write(data); err != nil {
+ return nil, err
+ }
+
+ return o2, nil
}
type objectIter struct {
p *Packfile
+ typ plumbing.ObjectType
iter idxfile.EntryIter
}
func (i *objectIter) Next() (plumbing.EncodedObject, error) {
- e, err := i.iter.Next()
- if err != nil {
- return nil, err
- }
+ for {
+ e, err := i.iter.Next()
+ if err != nil {
+ return nil, err
+ }
- return i.p.GetByOffset(int64(e.Offset))
+ obj, err := i.p.GetByOffset(int64(e.Offset))
+ if err != nil {
+ return nil, err
+ }
+
+ if i.typ == plumbing.AnyObject || obj.Type() == i.typ {
+ return obj, nil
+ }
+ }
}
func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error {
diff --git a/plumbing/format/packfile/packfile_test.go b/plumbing/format/packfile/packfile_test.go
index e234794..3193bed 100644
--- a/plumbing/format/packfile/packfile_test.go
+++ b/plumbing/format/packfile/packfile_test.go
@@ -1,23 +1,21 @@
-package packfile
+package packfile_test
import (
- "bytes"
"io"
"math"
- "io/ioutil"
-
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-billy.v4/osfs"
fixtures "gopkg.in/src-d/go-git-fixtures.v3"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
- "gopkg.in/src-d/go-git.v4/storage/memory"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
)
type PackfileSuite struct {
fixtures.Suite
- p *Packfile
+ p *packfile.Packfile
idx *idxfile.MemoryIndex
f *fixtures.Fixture
}
@@ -108,60 +106,157 @@ var expectedEntries = map[plumbing.Hash]int64{
plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e"): 84671,
}
-func (s *PackfileSuite) TestContent(c *C) {
- storer := memory.NewStorage()
- decoder, err := NewDecoder(NewScanner(s.f.Packfile()), storer)
- c.Assert(err, IsNil)
+func (s *PackfileSuite) SetUpTest(c *C) {
+ s.f = fixtures.Basic().One()
- _, err = decoder.Decode()
+ f, err := osfs.New("").Open(s.f.Packfile().Name())
c.Assert(err, IsNil)
- iter, err := s.p.GetAll()
+ s.idx = idxfile.NewMemoryIndex()
+ c.Assert(idxfile.NewDecoder(s.f.Idx()).Decode(s.idx), IsNil)
+
+ s.p = packfile.NewPackfile(s.idx, f)
+}
+
+func (s *PackfileSuite) TearDownTest(c *C) {
+ c.Assert(s.p.Close(), IsNil)
+}
+
+func (s *PackfileSuite) TestDecode(c *C) {
+ fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
+ index := getIndexFromIdxFile(f.Idx())
+ p := packfile.NewPackfile(index, f.Packfile())
+ defer p.Close()
+
+ for _, h := range expectedHashes {
+ obj, err := p.Get(plumbing.NewHash(h))
+ c.Assert(err, IsNil)
+ c.Assert(obj.Hash().String(), Equals, h)
+ }
+ })
+}
+
+func (s *PackfileSuite) TestDecodeByTypeRefDelta(c *C) {
+ f := fixtures.Basic().ByTag("ref-delta").One()
+
+ index := getIndexFromIdxFile(f.Idx())
+ packfile := packfile.NewPackfile(index, f.Packfile())
+ defer packfile.Close()
+
+ iter, err := packfile.GetByType(plumbing.CommitObject)
c.Assert(err, IsNil)
+ var count int
for {
- o, err := iter.Next()
+ obj, err := iter.Next()
if err == io.EOF {
break
}
+ count++
c.Assert(err, IsNil)
+ c.Assert(obj.Type(), Equals, plumbing.CommitObject)
+ }
- o2, err := storer.EncodedObject(plumbing.AnyObject, o.Hash())
- c.Assert(err, IsNil)
+ c.Assert(count > 0, Equals, true)
+}
- c.Assert(o.Type(), Equals, o2.Type())
- c.Assert(o.Size(), Equals, o2.Size())
+func (s *PackfileSuite) TestDecodeByType(c *C) {
+ ts := []plumbing.ObjectType{
+ plumbing.CommitObject,
+ plumbing.TagObject,
+ plumbing.TreeObject,
+ plumbing.BlobObject,
+ }
- r, err := o.Reader()
- c.Assert(err, IsNil)
+ fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
+ for _, t := range ts {
+ index := getIndexFromIdxFile(f.Idx())
+ packfile := packfile.NewPackfile(index, f.Packfile())
+ defer packfile.Close()
- c1, err := ioutil.ReadAll(r)
- c.Assert(err, IsNil)
- c.Assert(r.Close(), IsNil)
+ iter, err := packfile.GetByType(t)
+ c.Assert(err, IsNil)
- r, err = o2.Reader()
- c.Assert(err, IsNil)
+ c.Assert(iter.ForEach(func(obj plumbing.EncodedObject) error {
+ c.Assert(obj.Type(), Equals, t)
+ return nil
+ }), IsNil)
+ }
+ })
+}
- c2, err := ioutil.ReadAll(r)
- c.Assert(err, IsNil)
- c.Assert(r.Close(), IsNil)
+func (s *PackfileSuite) TestDecodeByTypeConstructor(c *C) {
+ f := fixtures.Basic().ByTag("packfile").One()
+ index := getIndexFromIdxFile(f.Idx())
+ packfile := packfile.NewPackfile(index, f.Packfile())
+ defer packfile.Close()
- c.Assert(bytes.Compare(c1, c2), Equals, 0)
- }
+ _, err := packfile.GetByType(plumbing.OFSDeltaObject)
+ c.Assert(err, Equals, plumbing.ErrInvalidType)
+
+ _, err = packfile.GetByType(plumbing.REFDeltaObject)
+ c.Assert(err, Equals, plumbing.ErrInvalidType)
+
+ _, err = packfile.GetByType(plumbing.InvalidObject)
+ c.Assert(err, Equals, plumbing.ErrInvalidType)
}
-func (s *PackfileSuite) SetUpTest(c *C) {
- s.f = fixtures.Basic().One()
+var expectedHashes = []string{
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
+ "d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
+ "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
+ "d5c0f4ab811897cadf03aec358ae60d21f91c50d",
+ "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
+ "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
+ "9dea2395f5403188298c1dabe8bdafe562c491e3",
+ "586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
+ "9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
+ "5a877e6a906a2743ad6e45d99c1793642aaf8eda",
+ "c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
+ "a8d315b2b1c615d43042c3a62402b8a54288cf5c",
+ "a39771a7651f97faf5c72e08224d857fc35133db",
+ "880cd14280f4b9b6ed3986d6671f907d7cc2a198",
+ "fb72698cab7617ac416264415f13224dfd7a165e",
+ "4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
+ "eba74343e2f15d62adedfd8c883ee0262b5c8021",
+ "c2d30fa8ef288618f65f6eed6e168e0d514886f4",
+ "8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
+ "aa9b383c260e1d05fbbf6b30a02914555e20c725",
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "dbd3641b371024f44d0e469a9c8f5457b0660de1",
+ "e8d3ffab552895c19b9fcf7aa264d277cde33881",
+ "7e59600739c96546163833214c36459e324bad0a",
+}
- f, err := osfs.New("").Open(s.f.Packfile().Name())
+func assertObjects(c *C, s storer.EncodedObjectStorer, expects []string) {
+ i, err := s.IterEncodedObjects(plumbing.AnyObject)
c.Assert(err, IsNil)
- s.idx = idxfile.NewMemoryIndex()
- c.Assert(idxfile.NewDecoder(s.f.Idx()).Decode(s.idx), IsNil)
+ var count int
+ err = i.ForEach(func(plumbing.EncodedObject) error { count++; return nil })
+ c.Assert(err, IsNil)
+ c.Assert(count, Equals, len(expects))
- s.p = NewPackfile(s.idx, f)
+ for _, exp := range expects {
+ obt, err := s.EncodedObject(plumbing.AnyObject, plumbing.NewHash(exp))
+ c.Assert(err, IsNil)
+ c.Assert(obt.Hash().String(), Equals, exp)
+ }
}
-func (s *PackfileSuite) TearDownTest(c *C) {
- c.Assert(s.p.Close(), IsNil)
+func getIndexFromIdxFile(r io.Reader) idxfile.Index {
+ idxf := idxfile.NewMemoryIndex()
+ d := idxfile.NewDecoder(r)
+ if err := d.Decode(idxf); err != nil {
+ panic(err)
+ }
+
+ return idxf
}
diff --git a/plumbing/format/packfile/parser.go b/plumbing/format/packfile/parser.go
index 696f5ba..f0a7674 100644
--- a/plumbing/format/packfile/parser.go
+++ b/plumbing/format/packfile/parser.go
@@ -9,6 +9,16 @@ import (
"gopkg.in/src-d/go-git.v4/plumbing/cache"
)
+var (
+ // ErrObjectContentAlreadyRead is returned when the content of the object
+ // was already read, since the content can only be read once.
+ ErrObjectContentAlreadyRead = errors.New("object content was already read")
+
+ // ErrReferenceDeltaNotFound is returned when the reference delta is not
+ // found.
+ ErrReferenceDeltaNotFound = errors.New("reference delta not found")
+)
+
// Observer interface is implemented by index encoders.
type Observer interface {
// OnHeader is called when a new packfile is opened.
@@ -16,7 +26,7 @@ type Observer interface {
// OnInflatedObjectHeader is called for each object header read.
OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error
// OnInflatedObjectContent is called for each decoded object.
- OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32) error
+ OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error
// OnFooter is called when decoding is done.
OnFooter(h plumbing.Hash) error
}
@@ -32,41 +42,44 @@ type Parser struct {
hashOffset map[plumbing.Hash]int64
checksum plumbing.Hash
- cache *cache.ObjectLRU
+ cache *cache.ObjectLRU
+ contentCache map[int64][]byte
ob []Observer
}
// NewParser creates a new Parser struct.
func NewParser(scanner *Scanner, ob ...Observer) *Parser {
+ var contentCache map[int64][]byte
+ if !scanner.IsSeekable {
+ contentCache = make(map[int64][]byte)
+ }
+
return &Parser{
- scanner: scanner,
- ob: ob,
- count: 0,
- cache: cache.NewObjectLRUDefault(),
+ scanner: scanner,
+ ob: ob,
+ count: 0,
+ cache: cache.NewObjectLRUDefault(),
+ contentCache: contentCache,
}
}
// Parse start decoding phase of the packfile.
func (p *Parser) Parse() (plumbing.Hash, error) {
- err := p.init()
- if err != nil {
+ if err := p.init(); err != nil {
return plumbing.ZeroHash, err
}
- err = p.firstPass()
- if err != nil {
+ if err := p.firstPass(); err != nil {
return plumbing.ZeroHash, err
}
- err = p.resolveDeltas()
- if err != nil {
+ if err := p.resolveDeltas(); err != nil {
return plumbing.ZeroHash, err
}
for _, o := range p.ob {
- err := o.OnFooter(p.checksum)
- if err != nil {
+ if err := o.OnFooter(p.checksum); err != nil {
return plumbing.ZeroHash, err
}
}
@@ -81,8 +94,7 @@ func (p *Parser) init() error {
}
for _, o := range p.ob {
- err := o.OnHeader(c)
- if err != nil {
+ if err := o.OnHeader(c); err != nil {
return err
}
}
@@ -99,7 +111,7 @@ func (p *Parser) firstPass() error {
buf := new(bytes.Buffer)
for i := uint32(0); i < p.count; i++ {
- buf.Truncate(0)
+ buf.Reset()
oh, err := p.scanner.NextObjectHeader()
if err != nil {
@@ -122,8 +134,7 @@ func (p *Parser) firstPass() error {
}
if !ok {
- // TODO improve error
- return errors.New("Reference delta not found")
+ return ErrReferenceDeltaNotFound
}
ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
@@ -143,35 +154,41 @@ func (p *Parser) firstPass() error {
ota.Length = oh.Length
if !delta {
- ota.Write(buf.Bytes())
+ if _, err := ota.Write(buf.Bytes()); err != nil {
+ return err
+ }
ota.SHA1 = ota.Sum()
+ p.oiByHash[ota.SHA1] = ota
}
p.oiByOffset[oh.Offset] = ota
- p.oiByHash[oh.Reference] = ota
p.oi[i] = ota
}
- checksum, err := p.scanner.Checksum()
- p.checksum = checksum
-
- if err == io.EOF {
- return nil
+ var err error
+ p.checksum, err = p.scanner.Checksum()
+ if err != nil && err != io.EOF {
+ return err
}
- return err
+ return nil
}
func (p *Parser) resolveDeltas() error {
for _, obj := range p.oi {
+ content, err := obj.Content()
+ if err != nil {
+ return err
+ }
+
for _, o := range p.ob {
err := o.OnInflatedObjectHeader(obj.Type, obj.Length, obj.Offset)
if err != nil {
return err
}
- err = o.OnInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32)
+ err = o.OnInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content)
if err != nil {
return err
}
@@ -185,8 +202,7 @@ func (p *Parser) resolveDeltas() error {
}
for _, child := range obj.Children {
- _, err = p.resolveObject(child, base)
- if err != nil {
+ if _, err := p.resolveObject(child, base); err != nil {
return err
}
}
@@ -205,8 +221,7 @@ func (p *Parser) get(o *objectInfo) ([]byte, error) {
}
buf := make([]byte, e.Size())
- _, err = r.Read(buf)
- if err != nil {
+ if _, err = r.Read(buf); err != nil {
return nil, err
}
@@ -254,8 +269,8 @@ func (p *Parser) get(o *objectInfo) ([]byte, error) {
func (p *Parser) resolveObject(
o *objectInfo,
- base []byte) ([]byte, error) {
-
+ base []byte,
+) ([]byte, error) {
if !o.DiskType.IsDelta() {
return nil, nil
}
@@ -278,16 +293,17 @@ func (p *Parser) readData(o *objectInfo) ([]byte, error) {
// TODO: skip header. Header size can be calculated with the offset of the
// next offset in the first pass.
- p.scanner.SeekFromStart(o.Offset)
- _, err := p.scanner.NextObjectHeader()
- if err != nil {
+ if _, err := p.scanner.SeekFromStart(o.Offset); err != nil {
return nil, err
}
- buf.Truncate(0)
+ if _, err := p.scanner.NextObjectHeader(); err != nil {
+ return nil, err
+ }
- _, _, err = p.scanner.NextObject(buf)
- if err != nil {
+ buf.Reset()
+
+ if _, _, err := p.scanner.NextObject(buf); err != nil {
return nil, err
}
@@ -301,9 +317,11 @@ func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
}
ota.Type = ota.Parent.Type
- hash := plumbing.ComputeHash(ota.Type, patched)
-
- ota.SHA1 = hash
+ ota.Hasher = plumbing.NewHasher(ota.Type, int64(len(patched)))
+ if _, err := ota.Write(patched); err != nil {
+ return nil, err
+ }
+ ota.SHA1 = ota.Sum()
return patched, nil
}
@@ -323,6 +341,8 @@ type objectInfo struct {
Parent *objectInfo
Children []*objectInfo
SHA1 plumbing.Hash
+
+ content *bytes.Buffer
}
func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo {
@@ -351,6 +371,30 @@ func newDeltaObject(
return obj
}
+func (o *objectInfo) Write(bs []byte) (int, error) {
+ n, err := o.Hasher.Write(bs)
+ if err != nil {
+ return 0, err
+ }
+
+ o.content = bytes.NewBuffer(nil)
+
+ _, _ = o.content.Write(bs)
+ return n, nil
+}
+
+// Content returns the content of the object. This operation can only be done
+// once.
+func (o *objectInfo) Content() ([]byte, error) {
+ if o.content == nil {
+ return nil, ErrObjectContentAlreadyRead
+ }
+
+ r := o.content
+ o.content = nil
+ return r.Bytes(), nil
+}
+
func (o *objectInfo) IsDelta() bool {
return o.Type.IsDelta()
}
diff --git a/plumbing/format/packfile/parser_test.go b/plumbing/format/packfile/parser_test.go
index 87a8804..b18f20f 100644
--- a/plumbing/format/packfile/parser_test.go
+++ b/plumbing/format/packfile/parser_test.go
@@ -103,7 +103,7 @@ func (t *testObserver) OnInflatedObjectHeader(otype plumbing.ObjectType, objSize
return nil
}
-func (t *testObserver) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32) error {
+func (t *testObserver) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error {
o := t.get(pos)
o.hash = h.String()
o.crc = crc
diff --git a/plumbing/object/blob_test.go b/plumbing/object/blob_test.go
index 5ed9de0..181436d 100644
--- a/plumbing/object/blob_test.go
+++ b/plumbing/object/blob_test.go
@@ -1,6 +1,7 @@
package object
import (
+ "bytes"
"io"
"io/ioutil"
@@ -88,8 +89,26 @@ func (s *BlobsSuite) TestBlobIter(c *C) {
}
c.Assert(err, IsNil)
- c.Assert(b, DeepEquals, blobs[i])
- i += 1
+ c.Assert(b.ID(), Equals, blobs[i].ID())
+ c.Assert(b.Size, Equals, blobs[i].Size)
+ c.Assert(b.Type(), Equals, blobs[i].Type())
+
+ r1, err := b.Reader()
+ c.Assert(err, IsNil)
+
+ b1, err := ioutil.ReadAll(r1)
+ c.Assert(err, IsNil)
+ c.Assert(r1.Close(), IsNil)
+
+ r2, err := blobs[i].Reader()
+ c.Assert(err, IsNil)
+
+ b2, err := ioutil.ReadAll(r2)
+ c.Assert(err, IsNil)
+ c.Assert(r2.Close(), IsNil)
+
+ c.Assert(bytes.Compare(b1, b2), Equals, 0)
+ i++
}
iter.Close()
diff --git a/plumbing/object/difftree_test.go b/plumbing/object/difftree_test.go
index 40af8f2..ff9ecbc 100644
--- a/plumbing/object/difftree_test.go
+++ b/plumbing/object/difftree_test.go
@@ -45,25 +45,17 @@ func (s *DiffTreeSuite) storageFromPackfile(f *fixtures.Fixture) storer.EncodedO
return sto
}
- sto = memory.NewStorage()
+ storer := memory.NewStorage()
pf := f.Packfile()
-
defer pf.Close()
- n := packfile.NewScanner(pf)
- d, err := packfile.NewDecoder(n, sto)
- if err != nil {
- panic(err)
- }
-
- _, err = d.Decode()
- if err != nil {
+ if err := packfile.UpdateObjectStorage(storer, pf); err != nil {
panic(err)
}
- s.cache[f.URL] = sto
- return sto
+ s.cache[f.URL] = storer
+ return storer
}
var _ = Suite(&DiffTreeSuite{})
diff --git a/plumbing/object/object_test.go b/plumbing/object/object_test.go
index 4f0fcb3..68aa1a1 100644
--- a/plumbing/object/object_test.go
+++ b/plumbing/object/object_test.go
@@ -197,8 +197,9 @@ func (s *ObjectsSuite) TestObjectIter(c *C) {
}
c.Assert(err, IsNil)
- c.Assert(o, DeepEquals, objects[i])
- i += 1
+ c.Assert(o.ID(), Equals, objects[i].ID())
+ c.Assert(o.Type(), Equals, objects[i].Type())
+ i++
}
iter.Close()
diff --git a/plumbing/transport/test/receive_pack.go b/plumbing/transport/test/receive_pack.go
index 57f602d..5aea1c0 100644
--- a/plumbing/transport/test/receive_pack.go
+++ b/plumbing/transport/test/receive_pack.go
@@ -262,13 +262,16 @@ func (s *ReceivePackSuite) receivePackNoCheck(c *C, ep *transport.Endpoint,
req.Packfile = s.emptyPackfile()
}
- return r.ReceivePack(context.Background(), req)
+ if s, err := r.ReceivePack(context.Background(), req); err != nil {
+ return s, err
+ } else {
+ return s, err
+ }
}
func (s *ReceivePackSuite) receivePack(c *C, ep *transport.Endpoint,
req *packp.ReferenceUpdateRequest, fixture *fixtures.Fixture,
callAdvertisedReferences bool) {
-
url := ""
if fixture != nil {
url = fixture.URL
@@ -279,7 +282,6 @@ func (s *ReceivePackSuite) receivePack(c *C, ep *transport.Endpoint,
ep.String(), url, callAdvertisedReferences,
)
report, err := s.receivePackNoCheck(c, ep, req, fixture, callAdvertisedReferences)
-
c.Assert(err, IsNil, comment)
if req.Capabilities.Supports(capability.ReportStatus) {
c.Assert(report, NotNil, comment)
diff --git a/plumbing/transport/test/upload_pack.go b/plumbing/transport/test/upload_pack.go
index 70e4e56..8709ac2 100644
--- a/plumbing/transport/test/upload_pack.go
+++ b/plumbing/transport/test/upload_pack.go
@@ -258,11 +258,8 @@ func (s *UploadPackSuite) checkObjectNumber(c *C, r io.Reader, n int) {
b, err := ioutil.ReadAll(r)
c.Assert(err, IsNil)
buf := bytes.NewBuffer(b)
- scanner := packfile.NewScanner(buf)
storage := memory.NewStorage()
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- _, err = d.Decode()
+ err = packfile.UpdateObjectStorage(storage, buf)
c.Assert(err, IsNil)
c.Assert(len(storage.Objects), Equals, n)
}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index b73b309..2032eac 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -12,7 +12,6 @@ import (
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
- "gopkg.in/src-d/go-git.v4/storage/memory"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
"gopkg.in/src-d/go-billy.v4"
@@ -282,29 +281,34 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
func (s *ObjectStorage) decodeObjectAt(
f billy.File,
idx idxfile.Index,
- offset int64) (plumbing.EncodedObject, error) {
- if _, err := f.Seek(0, io.SeekStart); err != nil {
- return nil, err
+ offset int64,
+) (plumbing.EncodedObject, error) {
+ hash, err := idx.FindHash(offset)
+ if err == nil {
+ obj, ok := s.deltaBaseCache.Get(hash)
+ if ok {
+ return obj, nil
+ }
}
- p := packfile.NewScanner(f)
+ if err != nil && err != plumbing.ErrObjectNotFound {
+ return nil, err
+ }
- d, err := packfile.NewDecoderWithCache(p, memory.NewStorage(),
- s.deltaBaseCache)
+ obj, err := packfile.NewPackfile(idx, f).GetByOffset(offset)
if err != nil {
return nil, err
}
- d.SetIndex(idx)
- obj, err := d.DecodeObjectAt(offset)
- return obj, err
+ return packfile.MemoryObjectFromDisk(obj)
}
func (s *ObjectStorage) decodeDeltaObjectAt(
f billy.File,
idx idxfile.Index,
offset int64,
- hash plumbing.Hash) (plumbing.EncodedObject, error) {
+ hash plumbing.Hash,
+) (plumbing.EncodedObject, error) {
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
@@ -453,22 +457,23 @@ func (it *lazyPackfilesIter) Close() {
}
type packfileIter struct {
- f billy.File
- d *packfile.Decoder
- t plumbing.ObjectType
-
- seen map[plumbing.Hash]struct{}
- position uint32
- total uint32
+ iter storer.EncodedObjectIter
+ seen map[plumbing.Hash]struct{}
}
// NewPackfileIter returns a new EncodedObjectIter for the provided packfile
// and object type.
func NewPackfileIter(
f billy.File,
+ idxFile billy.File,
t plumbing.ObjectType,
) (storer.EncodedObjectIter, error) {
- return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), nil, nil)
+ idx := idxfile.NewMemoryIndex()
+ if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
+ return nil, err
+ }
+
+ return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), idx, nil)
}
func newPackfileIter(
@@ -478,47 +483,26 @@ func newPackfileIter(
index idxfile.Index,
cache cache.Object,
) (storer.EncodedObjectIter, error) {
- s := packfile.NewScanner(f)
- _, total, err := s.Header()
+ iter, err := packfile.NewPackfile(index, f).GetByType(t)
if err != nil {
return nil, err
}
- d, err := packfile.NewDecoderForType(s, memory.NewStorage(), t, cache)
- if err != nil {
- return nil, err
- }
-
- d.SetIndex(index)
-
return &packfileIter{
- f: f,
- d: d,
- t: t,
-
- total: total,
- seen: seen,
+ iter: iter,
+ seen: seen,
}, nil
}
func (iter *packfileIter) Next() (plumbing.EncodedObject, error) {
for {
- if iter.position >= iter.total {
- return nil, io.EOF
- }
-
- obj, err := iter.d.DecodeObject()
+ obj, err := iter.iter.Next()
if err != nil {
return nil, err
}
- iter.position++
- if obj == nil {
- continue
- }
-
if _, ok := iter.seen[obj.Hash()]; ok {
- return iter.Next()
+ continue
}
return obj, nil
@@ -531,8 +515,7 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error {
}
func (iter *packfileIter) Close() {
- iter.f.Close()
- iter.d.Close()
+ iter.iter.Close()
}
type objectsIter struct {
diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go
index ecd6beb..ae11c3b 100644
--- a/storage/filesystem/object_test.go
+++ b/storage/filesystem/object_test.go
@@ -115,7 +115,11 @@ func (s *FsSuite) TestPackfileIter(c *C) {
for _, h := range ph {
f, err := dg.ObjectPack(h)
c.Assert(err, IsNil)
- iter, err := NewPackfileIter(f, t)
+
+ idxf, err := dg.ObjectPackIdx(h)
+ c.Assert(err, IsNil)
+
+ iter, err := NewPackfileIter(f, idxf, t)
c.Assert(err, IsNil)
err = iter.ForEach(func(o plumbing.EncodedObject) error {
c.Assert(o.Type(), Equals, t)