aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--common_test.go8
-rw-r--r--examples/clone/main.go12
-rw-r--r--formats/idxfile/decoder_test.go5
-rw-r--r--formats/packfile/decoder.go68
-rw-r--r--formats/packfile/decoder_test.go93
-rw-r--r--formats/packfile/scanner.go7
-rw-r--r--remote.go8
-rw-r--r--storage/filesystem/internal/dotgit/dotgit.go154
-rw-r--r--storage/filesystem/internal/dotgit/dotgit_test.go45
-rw-r--r--storage/filesystem/object.go12
-rw-r--r--tree_diff_test.go3
11 files changed, 314 insertions, 101 deletions
diff --git a/common_test.go b/common_test.go
index d35abd3..d95ccf5 100644
--- a/common_test.go
+++ b/common_test.go
@@ -55,8 +55,9 @@ func (s *BaseSuite) buildRepositories(c *C) {
defer f.Close()
n := packfile.NewScanner(f)
- d := packfile.NewDecoder(n, r.s.ObjectStorage())
- _, err := d.Decode()
+ d, err := packfile.NewDecoder(n, r.s.ObjectStorage())
+ c.Assert(err, IsNil)
+ _, err = d.Decode()
c.Assert(err, IsNil)
s.Repositories[fixture.URL] = r
@@ -151,7 +152,8 @@ func unpackFixtures(c *C, fixtures ...[]packedFixture) map[string]*Repository {
c.Assert(err, IsNil, comment)
r := packfile.NewScanner(f)
- d := packfile.NewDecoder(r, repos[fixture.url].s.ObjectStorage())
+ d, err := packfile.NewDecoder(r, repos[fixture.url].s.ObjectStorage())
+ c.Assert(err, IsNil, comment)
_, err = d.Decode()
c.Assert(err, IsNil, comment)
c.Assert(f.Close(), IsNil, comment)
diff --git a/examples/clone/main.go b/examples/clone/main.go
index f95dc8f..0fa5fe6 100644
--- a/examples/clone/main.go
+++ b/examples/clone/main.go
@@ -1,6 +1,7 @@
package main
import (
+ "fmt"
"io"
"os"
"path/filepath"
@@ -15,15 +16,15 @@ func main() {
url := os.Args[1]
directory := os.Args[2]
- r := git.NewMemoryRepository()
+ r, err := git.NewFilesystemRepository(directory)
+ checkIfError(err)
// Clone the given repository, using depth we create a shallow clone :
// > git clone <url> --depth 1
color.Blue("git clone %s --depth 1 %s", url, directory)
- err := r.Clone(&git.CloneOptions{
- URL: url,
- Depth: 1,
+ err = r.Clone(&git.CloneOptions{
+ URL: url,
})
checkIfError(err)
@@ -34,6 +35,9 @@ func main() {
commit, err := r.Commit(ref.Hash())
checkIfError(err)
+ fmt.Println(commit)
+ os.Exit(0)
+
// ... we get all the files from the commit
files, err := commit.Files()
checkIfError(err)
diff --git a/formats/idxfile/decoder_test.go b/formats/idxfile/decoder_test.go
index 8ce2bc7..02167a7 100644
--- a/formats/idxfile/decoder_test.go
+++ b/formats/idxfile/decoder_test.go
@@ -42,8 +42,9 @@ func (s *IdxfileSuite) TestDecodeCRCs(c *C) {
scanner := packfile.NewScanner(f.Packfile())
storage := memory.NewStorage()
- pd := packfile.NewDecoder(scanner, storage.ObjectStorage())
- _, err := pd.Decode()
+ pd, err := packfile.NewDecoder(scanner, storage.ObjectStorage())
+ c.Assert(err, IsNil)
+ _, err = pd.Decode()
c.Assert(err, IsNil)
i := &Idxfile{Version: VersionSupported}
diff --git a/formats/packfile/decoder.go b/formats/packfile/decoder.go
index c4b9182..4606a3f 100644
--- a/formats/packfile/decoder.go
+++ b/formats/packfile/decoder.go
@@ -30,11 +30,13 @@ var (
// ErrZLib is returned by Decode when there was an error unzipping
// the packfile contents.
ErrZLib = NewError("zlib reading error")
- // ErrNotSeeker not seeker supported
- ErrNotSeeker = NewError("no seeker capable decode")
// ErrCannotRecall is returned by RecallByOffset or RecallByHash if the object
// to recall cannot be returned.
ErrCannotRecall = NewError("cannot recall object")
+ // ErrNonSeekable is returned if a NewDecoder is used with a non-seekable
+ // reader and without a core.ObjectStorage or ReadObjectAt method is called
+ // without a seekable scanner
+ ErrNonSeekable = NewError("non-seekable scanner")
)
// Decoder reads and decodes packfiles from an input stream.
@@ -49,16 +51,25 @@ type Decoder struct {
}
// NewDecoder returns a new Decoder that reads from r.
-func NewDecoder(s *Scanner, o core.ObjectStorage) *Decoder {
+func NewDecoder(s *Scanner, o core.ObjectStorage) (*Decoder, error) {
+ if !s.IsSeekable && o == nil {
+ return nil, ErrNonSeekable
+ }
+
+ var tx core.TxObjectStorage
+ if o != nil {
+ tx = o.Begin()
+ }
+
return &Decoder{
s: s,
o: o,
- tx: o.Begin(),
+ tx: tx,
offsetToHash: make(map[int64]core.Hash, 0),
hashToOffset: make(map[core.Hash]int64, 0),
crcs: make(map[core.Hash]uint32, 0),
- }
+ }, nil
}
// Decode reads a packfile and stores it in the value pointed to by s.
@@ -76,6 +87,10 @@ func (d *Decoder) doDecode() error {
return err
}
+ if d.o == nil {
+ return d.readObjects(count)
+ }
+
if err := d.readObjects(count); err != nil {
if err := d.tx.Rollback(); err != nil {
return nil
@@ -89,10 +104,19 @@ func (d *Decoder) doDecode() error {
func (d *Decoder) readObjects(count uint32) error {
for i := 0; i < int(count); i++ {
- _, err := d.ReadObject()
+ obj, err := d.ReadObject()
if err != nil {
return err
}
+
+ if d.o == nil {
+ continue
+ }
+
+ if _, err := d.tx.Set(obj); err != nil {
+ return err
+ }
+
}
return nil
@@ -105,7 +129,7 @@ func (d *Decoder) ReadObject() (core.Object, error) {
return nil, err
}
- obj := d.o.NewObject()
+ obj := d.newObject()
obj.SetSize(h.Length)
obj.SetType(h.Type)
var crc uint32
@@ -128,15 +152,23 @@ func (d *Decoder) ReadObject() (core.Object, error) {
d.setOffset(hash, h.Offset)
d.setCRC(hash, crc)
- if _, err := d.tx.Set(obj); err != nil {
- return nil, err
+ return obj, nil
+}
+
+func (d *Decoder) newObject() core.Object {
+ if d.o == nil {
+ return &core.MemoryObject{}
}
- return obj, nil
+ return d.o.NewObject()
}
// ReadObjectAt reads an object at the given location
func (d *Decoder) ReadObjectAt(offset int64) (core.Object, error) {
+ if !d.s.IsSeekable {
+ return nil, ErrNonSeekable
+ }
+
beforeJump, err := d.s.Seek(offset)
if err != nil {
return nil, err
@@ -204,23 +236,29 @@ func (d *Decoder) setCRC(h core.Hash, crc uint32) {
}
func (d *Decoder) recallByOffset(o int64) (core.Object, error) {
+ if d.s.IsSeekable {
+ return d.ReadObjectAt(o)
+ }
+
if h, ok := d.offsetToHash[o]; ok {
return d.tx.Get(core.AnyObject, h)
}
- return d.ReadObjectAt(o)
+ return nil, core.ErrObjectNotFound
}
func (d *Decoder) recallByHash(h core.Hash) (core.Object, error) {
+ if d.s.IsSeekable {
+ if o, ok := d.hashToOffset[h]; ok {
+ return d.ReadObjectAt(o)
+ }
+ }
+
obj, err := d.tx.Get(core.AnyObject, h)
if err != core.ErrObjectNotFound {
return obj, err
}
- if o, ok := d.hashToOffset[h]; ok {
- return d.ReadObjectAt(o)
- }
-
return nil, core.ErrObjectNotFound
}
diff --git a/formats/packfile/decoder_test.go b/formats/packfile/decoder_test.go
index 9fe6142..cbf727e 100644
--- a/formats/packfile/decoder_test.go
+++ b/formats/packfile/decoder_test.go
@@ -25,56 +25,72 @@ func (s *ReaderSuite) TestDecode(c *C) {
scanner := NewScanner(f.Packfile())
storage := memory.NewStorage()
- d := NewDecoder(scanner, storage.ObjectStorage())
+ d, err := NewDecoder(scanner, storage.ObjectStorage())
+ c.Assert(err, IsNil)
ch, err := d.Decode()
c.Assert(err, IsNil)
c.Assert(ch, Equals, f.PackfileHash)
- AssertObjects(c, storage, []string{
- "918c48b83bd081e863dbe1b80f8998f058cd8294",
- "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
- "1669dce138d9b841a518c64b10914d88f5e488ea",
- "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
- "b8e471f58bcbca63b07bda20e428190409c2db47",
- "35e85108805c84807bc66a02d91535e1e24b38b9",
- "b029517f6300c2da0f4b651b8642506cd6aaf45d",
- "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
- "d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
- "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
- "d5c0f4ab811897cadf03aec358ae60d21f91c50d",
- "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
- "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
- "9dea2395f5403188298c1dabe8bdafe562c491e3",
- "586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
- "9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
- "5a877e6a906a2743ad6e45d99c1793642aaf8eda",
- "c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
- "a8d315b2b1c615d43042c3a62402b8a54288cf5c",
- "a39771a7651f97faf5c72e08224d857fc35133db",
- "880cd14280f4b9b6ed3986d6671f907d7cc2a198",
- "fb72698cab7617ac416264415f13224dfd7a165e",
- "4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
- "eba74343e2f15d62adedfd8c883ee0262b5c8021",
- "c2d30fa8ef288618f65f6eed6e168e0d514886f4",
- "8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
- "aa9b383c260e1d05fbbf6b30a02914555e20c725",
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
- "dbd3641b371024f44d0e469a9c8f5457b0660de1",
- "e8d3ffab552895c19b9fcf7aa264d277cde33881",
- "7e59600739c96546163833214c36459e324bad0a",
- })
+ AssertObjects(c, storage, expectedHashes)
+ })
+}
+func (s *ReaderSuite) TestDecodeInMemory(c *C) {
+ fixtures.Basic().Test(c, func(f *fixtures.Fixture) {
+ scanner := NewScanner(f.Packfile())
+ d, err := NewDecoder(scanner, nil)
+ c.Assert(err, IsNil)
+
+ ch, err := d.Decode()
+ c.Assert(err, IsNil)
+ c.Assert(ch, Equals, f.PackfileHash)
})
}
+
+var expectedHashes = []string{
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
+ "d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
+ "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
+ "d5c0f4ab811897cadf03aec358ae60d21f91c50d",
+ "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
+ "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
+ "9dea2395f5403188298c1dabe8bdafe562c491e3",
+ "586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
+ "9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
+ "5a877e6a906a2743ad6e45d99c1793642aaf8eda",
+ "c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
+ "a8d315b2b1c615d43042c3a62402b8a54288cf5c",
+ "a39771a7651f97faf5c72e08224d857fc35133db",
+ "880cd14280f4b9b6ed3986d6671f907d7cc2a198",
+ "fb72698cab7617ac416264415f13224dfd7a165e",
+ "4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
+ "eba74343e2f15d62adedfd8c883ee0262b5c8021",
+ "c2d30fa8ef288618f65f6eed6e168e0d514886f4",
+ "8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
+ "aa9b383c260e1d05fbbf6b30a02914555e20c725",
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "dbd3641b371024f44d0e469a9c8f5457b0660de1",
+ "e8d3ffab552895c19b9fcf7aa264d277cde33881",
+ "7e59600739c96546163833214c36459e324bad0a",
+}
+
func (s *ReaderSuite) TestDecodeCRCs(c *C) {
f := fixtures.Basic().ByTag("ofs-delta").One()
scanner := NewScanner(f.Packfile())
storage := memory.NewStorage()
- d := NewDecoder(scanner, storage.ObjectStorage())
- _, err := d.Decode()
+ d, err := NewDecoder(scanner, storage.ObjectStorage())
+ c.Assert(err, IsNil)
+ _, err = d.Decode()
c.Assert(err, IsNil)
var sum uint64
@@ -88,9 +104,8 @@ func (s *ReaderSuite) TestDecodeCRCs(c *C) {
func (s *ReaderSuite) TestReadObjectAt(c *C) {
f := fixtures.Basic().One()
scanner := NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- d := NewDecoder(scanner, storage.ObjectStorage())
+ d, err := NewDecoder(scanner, nil)
+ c.Assert(err, IsNil)
// when the packfile is ref-delta based, the offsets are required
if f.Is("ref-delta") {
diff --git a/formats/packfile/scanner.go b/formats/packfile/scanner.go
index 86092a1..a5e4215 100644
--- a/formats/packfile/scanner.go
+++ b/formats/packfile/scanner.go
@@ -49,6 +49,10 @@ type Scanner struct {
// is waiting to be read
pendingObject *ObjectHeader
version, objects uint32
+
+ // lsSeekable says if this scanner can do Seek or not, to have a Scanner
+ // seekable a r implementing io.Seeker is required
+ IsSeekable bool
}
// NewScanner returns a new Scanner based on a reader, if the given reader
@@ -65,7 +69,8 @@ func NewScanner(r io.Reader) *Scanner {
newByteReadSeeker(seeker),
crc,
},
- crc: crc,
+ crc: crc,
+ IsSeekable: ok,
}
}
diff --git a/remote.go b/remote.go
index 8e2d8a8..3e341fa 100644
--- a/remote.go
+++ b/remote.go
@@ -174,8 +174,12 @@ func (r *Remote) updateObjectStorage(reader io.Reader) error {
}
stream := packfile.NewScanner(reader)
- d := packfile.NewDecoder(stream, s)
- _, err := d.Decode()
+ d, err := packfile.NewDecoder(stream, s)
+ if err != nil {
+ return err
+ }
+
+ _, err = d.Decode()
return err
}
diff --git a/storage/filesystem/internal/dotgit/dotgit.go b/storage/filesystem/internal/dotgit/dotgit.go
index cacda68..54113d5 100644
--- a/storage/filesystem/internal/dotgit/dotgit.go
+++ b/storage/filesystem/internal/dotgit/dotgit.go
@@ -8,12 +8,12 @@ import (
"io"
"os"
"strings"
+ "sync/atomic"
"time"
"gopkg.in/src-d/go-git.v4/core"
"gopkg.in/src-d/go-git.v4/formats/idxfile"
"gopkg.in/src-d/go-git.v4/formats/packfile"
- "gopkg.in/src-d/go-git.v4/storage/memory"
"gopkg.in/src-d/go-git.v4/utils/fs"
)
@@ -227,35 +227,35 @@ func isHexAlpha(b byte) bool {
}
type PackWriter struct {
- fs fs.Filesystem
- sr io.ReadCloser
- sw io.WriteCloser
- fw fs.File
- mw io.Writer
+ Notify func(h core.Hash, i idxfile.Idxfile)
+ fs fs.Filesystem
+ fr, fw fs.File
+ synced *syncedReader
checksum core.Hash
index idxfile.Idxfile
result chan error
- Notify func(h core.Hash, i idxfile.Idxfile)
}
func newPackWrite(fs fs.Filesystem) (*PackWriter, error) {
- temp := sha1.Sum([]byte(time.Now().String()))
- filename := fmt.Sprintf(".%x", temp)
+ seed := sha1.Sum([]byte(time.Now().String()))
+ tmp := fs.Join(objectsPath, packPath, fmt.Sprintf("tmp_pack_%x", seed))
- fw, err := fs.Create(fs.Join(objectsPath, packPath, filename))
+ fw, err := fs.Create(tmp)
if err != nil {
return nil, err
}
- sr, sw := io.Pipe()
+ fr, err := fs.Open(tmp)
+ if err != nil {
+ return nil, err
+ }
writer := &PackWriter{
fs: fs,
fw: fw,
- sr: sr,
- sw: sw,
- mw: io.MultiWriter(sw, fw),
+ fr: fr,
+ synced: newSyncedReader(fw, fr),
result: make(chan error),
}
@@ -264,10 +264,12 @@ func newPackWrite(fs fs.Filesystem) (*PackWriter, error) {
}
func (w *PackWriter) buildIndex() {
- defer w.sr.Close()
- o := memory.NewStorage().ObjectStorage()
- s := packfile.NewScanner(w.sr)
- d := packfile.NewDecoder(s, o)
+ s := packfile.NewScanner(w.synced)
+ d, err := packfile.NewDecoder(s, nil)
+ if err != nil {
+ w.result <- err
+ return
+ }
checksum, err := d.Decode()
if err != nil {
@@ -287,8 +289,8 @@ func (w *PackWriter) buildIndex() {
w.result <- err
}
-func (w *PackWriter) Write(p []byte) (int, error) {
- return w.mw.Write(p)
+func (w *PackWriter) Write(p []byte) (n int, err error) {
+ return w.synced.Write(p)
}
func (w *PackWriter) Close() error {
@@ -296,20 +298,18 @@ func (w *PackWriter) Close() error {
close(w.result)
}()
- if err := w.fw.Close(); err != nil {
- return err
- }
-
- if err := w.sw.Close(); err != nil {
- return err
- }
-
- if err := <-w.result; err != nil {
- return err
+ pipe := []func() error{
+ func() error { return <-w.result },
+ w.fr.Close,
+ w.fw.Close,
+ w.synced.Close,
+ w.save,
}
- if err := w.save(); err != nil {
- return err
+ for i, f := range pipe {
+ if err := f(); err != nil {
+ return err
+ }
}
if w.Notify != nil {
@@ -342,3 +342,93 @@ func (w *PackWriter) encodeIdx(writer io.Writer) error {
_, err := e.Encode(&w.index)
return err
}
+
+type syncedReader struct {
+ w io.Writer
+ r io.ReadSeeker
+
+ blocked, done uint32
+ written, read uint64
+ news chan bool
+}
+
+func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader {
+ return &syncedReader{
+ w: w,
+ r: r,
+ news: make(chan bool),
+ }
+}
+
+func (s *syncedReader) Write(p []byte) (n int, err error) {
+ defer func() {
+ written := atomic.AddUint64(&s.written, uint64(n))
+ read := atomic.LoadUint64(&s.read)
+ if written > read {
+ s.wake()
+ }
+ }()
+
+ n, err = s.w.Write(p)
+ return
+}
+
+func (s *syncedReader) Read(p []byte) (n int, err error) {
+ defer func() { atomic.AddUint64(&s.read, uint64(n)) }()
+
+ s.sleep()
+ n, err = s.r.Read(p)
+ if err == io.EOF && !s.isDone() {
+ if n == 0 {
+ return s.Read(p)
+ }
+
+ return n, nil
+ }
+
+ return
+}
+
+func (s *syncedReader) isDone() bool {
+ return atomic.LoadUint32(&s.done) == 1
+}
+
+func (s *syncedReader) isBlocked() bool {
+ return atomic.LoadUint32(&s.blocked) == 1
+}
+
+func (s *syncedReader) wake() {
+ if s.isBlocked() {
+ // fmt.Println("wake")
+ atomic.StoreUint32(&s.blocked, 0)
+ s.news <- true
+ }
+}
+
+func (s *syncedReader) sleep() {
+ read := atomic.LoadUint64(&s.read)
+ written := atomic.LoadUint64(&s.written)
+ if read >= written {
+ atomic.StoreUint32(&s.blocked, 1)
+ // fmt.Println("sleep", read, written)
+ <-s.news
+ }
+
+}
+
+func (s *syncedReader) Seek(offset int64, whence int) (int64, error) {
+ if whence == io.SeekCurrent {
+ return s.r.Seek(offset, whence)
+ }
+
+ p, err := s.r.Seek(offset, whence)
+ s.read = uint64(p)
+
+ return p, err
+}
+
+func (s *syncedReader) Close() error {
+ atomic.StoreUint32(&s.done, 1)
+ close(s.news)
+ return nil
+}
diff --git a/storage/filesystem/internal/dotgit/dotgit_test.go b/storage/filesystem/internal/dotgit/dotgit_test.go
index ca2b5b4..f105c58 100644
--- a/storage/filesystem/internal/dotgit/dotgit_test.go
+++ b/storage/filesystem/internal/dotgit/dotgit_test.go
@@ -7,6 +7,7 @@ import (
"log"
"os"
"path/filepath"
+ "strconv"
"strings"
"testing"
@@ -191,3 +192,47 @@ func (s *SuiteDotGit) TestNewObjectPack(c *C) {
c.Assert(err, IsNil)
c.Assert(stat.Size(), Equals, int64(1940))
}
+
+func (s *SuiteDotGit) TestSyncedReader(c *C) {
+ tmpw, err := ioutil.TempFile("", "example")
+ c.Assert(err, IsNil)
+
+ tmpr, err := os.Open(tmpw.Name())
+ c.Assert(err, IsNil)
+
+ defer func() {
+ tmpw.Close()
+ tmpr.Close()
+ os.Remove(tmpw.Name())
+ }()
+
+ synced := newSyncedReader(tmpw, tmpr)
+
+ go func() {
+ for i := 0; i < 281; i++ {
+ _, err := synced.Write([]byte(strconv.Itoa(i) + "\n"))
+ c.Assert(err, IsNil)
+ }
+
+ synced.Close()
+ }()
+
+ o, err := synced.Seek(1002, io.SeekStart)
+ c.Assert(err, IsNil)
+ c.Assert(o, Equals, int64(1002))
+
+ head := make([]byte, 3)
+ n, err := io.ReadFull(synced, head)
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+ c.Assert(string(head), Equals, "278")
+
+ o, err = synced.Seek(1010, io.SeekStart)
+ c.Assert(err, IsNil)
+ c.Assert(o, Equals, int64(1010))
+
+ n, err = io.ReadFull(synced, head)
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+ c.Assert(string(head), Equals, "280")
+}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index dc21d0b..03939ce 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -161,7 +161,11 @@ func (s *ObjectStorage) getFromPackfile(h core.Hash) (core.Object, error) {
defer f.Close()
p := packfile.NewScanner(f)
- d := packfile.NewDecoder(p, memory.NewStorage().ObjectStorage())
+ d, err := packfile.NewDecoder(p, memory.NewStorage().ObjectStorage())
+ if err != nil {
+ return nil, err
+ }
+
d.SetOffsets(s.index[pack])
return d.ReadObjectAt(offset)
}
@@ -285,7 +289,11 @@ func newPackfileIter(
return nil, err
}
- d := packfile.NewDecoder(s, memory.NewStorage().ObjectStorage())
+ d, err := packfile.NewDecoder(s, memory.NewStorage().ObjectStorage())
+ if err != nil {
+ return nil, err
+ }
+
return &packfileIter{f: f, d: d, t: t, total: total, seen: seen}, nil
}
diff --git a/tree_diff_test.go b/tree_diff_test.go
index 9a0b04c..44f6910 100644
--- a/tree_diff_test.go
+++ b/tree_diff_test.go
@@ -49,7 +49,8 @@ func (s *DiffTreeSuite) SetUpSuite(c *C) {
c.Assert(err, IsNil)
r := packfile.NewScanner(f)
- d := packfile.NewDecoder(r, s.repos[fixRepo.url].s.ObjectStorage())
+ d, err := packfile.NewDecoder(r, s.repos[fixRepo.url].s.ObjectStorage())
+ c.Assert(err, IsNil)
_, err = d.Decode()
c.Assert(err, IsNil)