aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMáximo Cuadros <mcuadros@gmail.com>2016-09-07 02:04:43 +0200
committerMáximo Cuadros <mcuadros@gmail.com>2016-09-07 02:04:43 +0200
commit56adb5be3ad26a0045ea6c6a6d24dafdff15ba1c (patch)
treebd8ecbb6674f01f03f97fb15083bed0a3e8e021d
parent98a22e72a808aa0d5dd62339817404fd9e1c4db6 (diff)
downloadgo-git-56adb5be3ad26a0045ea6c6a6d24dafdff15ba1c.tar.gz
format: packfile new interface
-rw-r--r--blame_test.go10
-rw-r--r--common_test.go18
-rw-r--r--core/memory.go2
-rw-r--r--examples/fs_implementation/main.go2
-rw-r--r--examples/fs_implementation/main_test.go2
-rw-r--r--formats/packfile/decoder.go180
-rw-r--r--formats/packfile/decoder_test.go36
-rw-r--r--formats/packfile/parser.go154
-rw-r--r--formats/packfile/parser_test.go20
-rw-r--r--references_test.go4
-rw-r--r--remote.go5
-rw-r--r--remote_test.go14
-rw-r--r--repository.go2
-rw-r--r--storage/filesystem/internal/dotgit/dotgit_test.go4
-rw-r--r--storage/filesystem/internal/index/index.go26
-rw-r--r--storage/filesystem/internal/index/index_test.go29
-rw-r--r--storage/filesystem/object.go24
-rw-r--r--storage/filesystem/object_test.go14
-rw-r--r--storage/memory/storage.go1
-rw-r--r--tree_diff_test.go4
-rw-r--r--utils/fs/os.go43
-rw-r--r--utils/fs/os_test.go10
22 files changed, 356 insertions, 248 deletions
diff --git a/blame_test.go b/blame_test.go
index a8f7ae1..c4cdc85 100644
--- a/blame_test.go
+++ b/blame_test.go
@@ -1,8 +1,6 @@
package git
import (
- "bytes"
- "io/ioutil"
"os"
"gopkg.in/src-d/go-git.v4/core"
@@ -26,15 +24,11 @@ func (s *BlameCommon) SetUpSuite(c *C) {
f, err := os.Open(fixRepo.packfile)
c.Assert(err, IsNil)
- data, err := ioutil.ReadAll(f)
- c.Assert(err, IsNil)
-
- stream := packfile.NewStream(bytes.NewReader(data))
+ stream := packfile.NewScanner(f)
d := packfile.NewDecoder(stream, r.s.ObjectStorage())
- err = d.Decode()
+ _, err = d.Decode()
c.Assert(err, IsNil)
-
c.Assert(f.Close(), IsNil)
s.repos[fixRepo.url] = r
diff --git a/common_test.go b/common_test.go
index f054f23..5670826 100644
--- a/common_test.go
+++ b/common_test.go
@@ -1,10 +1,8 @@
package git
import (
- "bytes"
"errors"
"io"
- "io/ioutil"
"os"
"testing"
@@ -23,10 +21,17 @@ type BaseSuite struct {
}
func (s *BaseSuite) SetUpSuite(c *C) {
+ s.installMockProtocol(c)
+ s.buildRepository(c)
+}
+
+func (s *BaseSuite) installMockProtocol(c *C) {
clients.InstallProtocol("mock", func(end common.Endpoint) common.GitUploadPackService {
return &MockGitUploadPackService{endpoint: end}
})
+}
+func (s *BaseSuite) buildRepository(c *C) {
s.Repository = NewMemoryRepository()
err := s.Repository.Clone(&CloneOptions{URL: RepositoryFixture})
c.Assert(err, IsNil)
@@ -117,14 +122,9 @@ func unpackFixtures(c *C, fixtures ...[]packedFixture) map[string]*Repository {
f, err := os.Open(fixture.packfile)
c.Assert(err, IsNil, comment)
- // increase memory consumption to speed up tests
- data, err := ioutil.ReadAll(f)
- c.Assert(err, IsNil)
- memStream := bytes.NewReader(data)
- r := packfile.NewStream(memStream)
-
+ r := packfile.NewScanner(f)
d := packfile.NewDecoder(r, repos[fixture.url].s.ObjectStorage())
- err = d.Decode()
+ _, err = d.Decode()
c.Assert(err, IsNil, comment)
c.Assert(f.Close(), IsNil, comment)
diff --git a/core/memory.go b/core/memory.go
index 3d8063d..97b1b9b 100644
--- a/core/memory.go
+++ b/core/memory.go
@@ -1,4 +1,4 @@
-package core
+package core
import (
"bytes"
diff --git a/examples/fs_implementation/main.go b/examples/fs_implementation/main.go
index 626cc8a..69ee0a0 100644
--- a/examples/fs_implementation/main.go
+++ b/examples/fs_implementation/main.go
@@ -1,5 +1,6 @@
package main
+/*
import (
"fmt"
"io"
@@ -108,3 +109,4 @@ func (fs *CustomFS) ReadDir(path string) ([]os.FileInfo, error) {
func (fs *CustomFS) Join(elem ...string) string {
return strings.Join(elem, separator)
}
+*/
diff --git a/examples/fs_implementation/main_test.go b/examples/fs_implementation/main_test.go
index ed56cf1..7a7607a 100644
--- a/examples/fs_implementation/main_test.go
+++ b/examples/fs_implementation/main_test.go
@@ -1,5 +1,6 @@
package main
+/*
import (
"io/ioutil"
"os"
@@ -193,3 +194,4 @@ func TestReadDir(t *testing.T) {
}
}
}
+*/
diff --git a/formats/packfile/decoder.go b/formats/packfile/decoder.go
index 3da927d..18ec6b9 100644
--- a/formats/packfile/decoder.go
+++ b/formats/packfile/decoder.go
@@ -2,8 +2,6 @@ package packfile
import (
"bytes"
- "io"
- "os"
"gopkg.in/src-d/go-git.v4/core"
)
@@ -32,9 +30,8 @@ var (
// ErrZLib is returned by Decode when there was an error unzipping
// the packfile contents.
ErrZLib = NewError("zlib reading error")
- // ErrDuplicatedObject is returned by Remember if an object appears several
- // times in a packfile.
- ErrDuplicatedObject = NewError("duplicated object")
+ // ErrNotSeeker not seeker supported
+ ErrNotSeeker = NewError("no seeker capable decode")
// ErrCannotRecall is returned by RecallByOffset or RecallByHash if the object
// to recall cannot be returned.
ErrCannotRecall = NewError("cannot recall object")
@@ -42,33 +39,43 @@ var (
// Decoder reads and decodes packfiles from an input stream.
type Decoder struct {
- p *Parser
- s core.ObjectStorage
- seeker io.Seeker
+ scanner *Scanner
+ storage core.ObjectStorage
offsetToObject map[int64]core.Object
hashToOffset map[core.Hash]int64
}
// NewDecoder returns a new Decoder that reads from r.
-func NewDecoder(s core.ObjectStorage, p *Parser, seeker io.Seeker) *Decoder {
+func NewDecoder(p *Scanner, s core.ObjectStorage) *Decoder {
return &Decoder{
- p: p,
- s: s,
- seeker: seeker,
+ scanner: p,
+ storage: s,
offsetToObject: make(map[int64]core.Object, 0),
hashToOffset: make(map[core.Hash]int64, 0),
}
}
// Decode reads a packfile and stores it in the value pointed to by s.
-func (d *Decoder) Decode() error {
- _, count, err := d.p.Header()
+func (d *Decoder) Decode() (checksum core.Hash, err error) {
+ if err := d.doDecode(); err != nil {
+ return core.ZeroHash, err
+ }
+
+ return d.scanner.Checksum()
+}
+
+func (d *Decoder) doDecode() error {
+ _, count, err := d.scanner.Header()
if err != nil {
return err
}
- tx := d.s.Begin()
- if err := d.readObjects(tx, count); err != nil {
+ if d.storage == nil {
+ return d.readObjects(count, nil)
+ }
+
+ tx := d.storage.Begin()
+ if err := d.readObjects(count, tx); err != nil {
if err := tx.Rollback(); err != nil {
return nil
}
@@ -76,23 +83,27 @@ func (d *Decoder) Decode() error {
return err
}
- return tx.Commit()
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
}
-func (d *Decoder) readObjects(tx core.TxObjectStorage, count uint32) error {
- // This code has 50-80 µs of overhead per object not counting zlib inflation.
- // Together with zlib inflation, it's 400-410 µs for small objects.
- // That's 1 sec for ~2450 objects, ~4.20 MB, or ~250 ms per MB,
- // of which 12-20 % is _not_ zlib inflation (ie. is our code).
+func (d *Decoder) readObjects(count uint32, tx core.TxObjectStorage) error {
for i := 0; i < int(count); i++ {
obj, err := d.readObject()
if err != nil {
return err
}
+ if tx == nil {
+ continue
+ }
+
_, err = tx.Set(obj)
- if err == io.EOF {
- break
+ if err != nil {
+ return err
}
}
@@ -100,12 +111,12 @@ func (d *Decoder) readObjects(tx core.TxObjectStorage, count uint32) error {
}
func (d *Decoder) readObject() (core.Object, error) {
- h, err := d.p.NextObjectHeader()
+ h, err := d.scanner.NextObjectHeader()
if err != nil {
return nil, err
}
- obj := d.s.NewObject()
+ obj := d.newObject()
obj.SetSize(h.Length)
obj.SetType(h.Type)
@@ -120,7 +131,20 @@ func (d *Decoder) readObject() (core.Object, error) {
err = ErrInvalidObject.AddDetails("type %q", h.Type)
}
- return obj, d.remember(h.Offset, obj)
+ if err != nil {
+ return obj, err
+ }
+
+ d.remember(h.Offset, obj)
+ return obj, nil
+}
+
+func (d *Decoder) newObject() core.Object {
+ if d.storage == nil {
+ return &core.MemoryObject{}
+ }
+
+ return d.storage.NewObject()
}
func (d *Decoder) fillRegularObjectContent(obj core.Object) error {
@@ -129,103 +153,107 @@ func (d *Decoder) fillRegularObjectContent(obj core.Object) error {
return err
}
- _, err = d.p.NextObject(w)
+ _, err = d.scanner.NextObject(w)
return err
}
func (d *Decoder) fillREFDeltaObjectContent(obj core.Object, ref core.Hash) error {
- base, err := d.recallByHash(ref)
- if err != nil {
- return err
- }
- obj.SetType(base.Type())
- if err := d.readAndApplyDelta(obj, base); err != nil {
+ buf := bytes.NewBuffer(nil)
+ if _, err := d.scanner.NextObject(buf); err != nil {
return err
}
- return nil
-}
-
-func (d *Decoder) fillOFSDeltaObjectContent(obj core.Object, offset int64) error {
- base, err := d.recallByOffset(offset)
+ base, err := d.recallByHash(ref)
if err != nil {
return err
}
obj.SetType(base.Type())
- if err := d.readAndApplyDelta(obj, base); err != nil {
- return err
- }
-
- return nil
+ return ApplyDelta(obj, base, buf.Bytes())
}
-// ReadAndApplyDelta reads and apply the base patched with the contents
-// of a zlib compressed diff data in the delta portion of an object
-// entry in the packfile.
-func (d *Decoder) readAndApplyDelta(target, base core.Object) error {
+func (d *Decoder) fillOFSDeltaObjectContent(obj core.Object, offset int64) error {
buf := bytes.NewBuffer(nil)
- if _, err := d.p.NextObject(buf); err != nil {
+ if _, err := d.scanner.NextObject(buf); err != nil {
+ return err
+ }
+
+ base, err := d.recallByOffset(offset)
+ if err != nil {
return err
}
- return ApplyDelta(target, base, buf.Bytes())
+ obj.SetType(base.Type())
+ return ApplyDelta(obj, base, buf.Bytes())
}
-// Remember stores the offset of the object and its hash, but not the
-// object itself. This implementation does not check for already stored
-// offsets, as it is too expensive to build this information from an
-// index every time a get operation is performed on the SeekableReadRecaller.
-func (r *Decoder) remember(o int64, obj core.Object) error {
+// remember stores the offset of the object and its hash and the object itself.
+// If a seeker was not provided to the decoder, the objects are stored in memory
+func (d *Decoder) remember(o int64, obj core.Object) {
h := obj.Hash()
- r.hashToOffset[h] = o
- r.offsetToObject[o] = obj
- return nil
+
+ d.hashToOffset[h] = o
+ if !d.scanner.IsSeekable() {
+ d.offsetToObject[o] = obj
+ }
}
-// RecallByHash returns the object for a given hash by looking for it again in
+// recallByHash returns the object for a given hash by looking for it again in
// the io.ReadeSeerker.
-func (r *Decoder) recallByHash(h core.Hash) (core.Object, error) {
- o, ok := r.hashToOffset[h]
+func (d *Decoder) recallByHash(h core.Hash) (core.Object, error) {
+ o, ok := d.hashToOffset[h]
if !ok {
return nil, ErrCannotRecall.AddDetails("hash not found: %s", h)
}
- return r.recallByOffset(o)
+ return d.recallByOffset(o)
}
-// RecallByOffset returns the object for a given offset by looking for it again in
+// recallByOffset returns the object for a given offset by looking for it again in
// the io.ReadeSeerker. For efficiency reasons, this method always find objects by
// offset, even if they have not been remembered or if they have been forgetted.
-func (r *Decoder) recallByOffset(o int64) (obj core.Object, err error) {
- obj, ok := r.offsetToObject[o]
+func (d *Decoder) recallByOffset(o int64) (core.Object, error) {
+ obj, ok := d.offsetToObject[o]
if ok {
return obj, nil
}
- if !ok && r.seeker == nil {
+ if !ok && !d.scanner.IsSeekable() {
return nil, ErrCannotRecall.AddDetails("no object found at offset %d", o)
}
- // remember current offset
- beforeJump, err := r.seeker.Seek(0, os.SEEK_CUR)
+ return d.ReadObjectAt(o)
+}
+
+// ReadObjectAt reads an object at the given location
+func (d *Decoder) ReadObjectAt(offset int64) (core.Object, error) {
+ if !d.scanner.IsSeekable() {
+ return nil, ErrNotSeeker
+ }
+
+ beforeJump, err := d.scanner.Seek(offset)
if err != nil {
return nil, err
}
defer func() {
- // jump back
- _, seekErr := r.seeker.Seek(beforeJump, os.SEEK_SET)
+ _, seekErr := d.scanner.Seek(beforeJump)
if err == nil {
err = seekErr
}
}()
- // jump to requested offset
- _, err = r.seeker.Seek(o, os.SEEK_SET)
- if err != nil {
- return nil, err
- }
+ return d.readObject()
+}
+
+// Index returns an index of the objects read by hash and the position where
+// was read
+func (d *Decoder) Index() map[core.Hash]int64 {
+ return d.hashToOffset
+}
- return r.readObject()
+// Close close the Scanner, usually this mean that the whole reader is read and
+// discarded
+func (d *Decoder) Close() error {
+ return d.scanner.Close()
}
diff --git a/formats/packfile/decoder_test.go b/formats/packfile/decoder_test.go
index 5a95af1..e229f50 100644
--- a/formats/packfile/decoder_test.go
+++ b/formats/packfile/decoder_test.go
@@ -24,9 +24,9 @@ func (s *ReaderSuite) TestReadPackfile(c *C) {
data, _ := base64.StdEncoding.DecodeString(packFileWithEmptyObjects)
f := bytes.NewReader(data)
sto := memory.NewStorage()
- d := NewDecoder(sto.ObjectStorage(), NewParser(f), nil)
+ d := NewDecoder(NewScanner(f), sto.ObjectStorage())
- err := d.Decode()
+ _, err := d.Decode()
c.Assert(err, IsNil)
AssertObjects(c, sto, []string{
@@ -44,21 +44,39 @@ func (s *ReaderSuite) TestReadPackfile(c *C) {
})
}
-func (s *ReaderSuite) TestReadPackfileOFSDelta(c *C) {
- s.testReadPackfileGitFixture(c, "fixtures/git-fixture.ofs-delta", OFSDeltaFormat)
+func (s *ReaderSuite) TestDecodeOFSDelta(c *C) {
+ s.testDecode(c, "fixtures/git-fixture.ofs-delta", true)
+}
+func (s *ReaderSuite) TestDecodeOFSDeltaNoSeekable(c *C) {
+ s.testDecode(c, "fixtures/git-fixture.ofs-delta", false)
}
-func (s *ReaderSuite) TestReadPackfileREFDelta(c *C) {
- s.testReadPackfileGitFixture(c, "fixtures/git-fixture.ref-delta", REFDeltaFormat)
+
+func (s *ReaderSuite) TestDecodeREFDelta(c *C) {
+ s.testDecode(c, "fixtures/git-fixture.ref-delta", true)
}
-func (s *ReaderSuite) testReadPackfileGitFixture(c *C, file string, format Format) {
+func (s *ReaderSuite) TestDecodeREFDeltaNoSeekable(c *C) {
+ s.testDecode(c, "fixtures/git-fixture.ref-delta", false)
+}
+
+func (s *ReaderSuite) testDecode(c *C, file string, seekable bool) {
f, err := os.Open(file)
c.Assert(err, IsNil)
+
+ scanner := NewScanner(f)
+ if !seekable {
+ scanner = NewScannerFromReader(f)
+ }
+
+ s.doTestDecodeWithScanner(c, scanner)
+}
+
+func (s *ReaderSuite) doTestDecodeWithScanner(c *C, scanner *Scanner) {
sto := memory.NewStorage()
- d := NewDecoder(sto.ObjectStorage(), NewParser(f), f)
+ d := NewDecoder(scanner, sto.ObjectStorage())
- err = d.Decode()
+ _, err := d.Decode()
c.Assert(err, IsNil)
AssertObjects(c, sto, []string{
diff --git a/formats/packfile/parser.go b/formats/packfile/parser.go
index c1653c8..94f552a 100644
--- a/formats/packfile/parser.go
+++ b/formats/packfile/parser.go
@@ -19,6 +19,8 @@ var (
// ErrUnsupportedVersion is returned by ReadHeader when the packfile version is
// different than VersionSupported.
ErrUnsupportedVersion = NewError("unsupported packfile version")
+ // ErrSeekNotSupported returned if seek is not support
+ ErrSeekNotSupported = NewError("not seek support")
)
const (
@@ -26,6 +28,8 @@ const (
VersionSupported uint32 = 2
)
+// ObjectHeader contains the information related to the object, this information
+// is collected from the previous bytes to the content of the object.
type ObjectHeader struct {
Type core.ObjectType
Offset int64
@@ -36,8 +40,8 @@ type ObjectHeader struct {
// A Parser is a collection of functions to read and process data form a packfile.
// Values from this type are not zero-value safe. See the NewParser function bellow.
-type Parser struct {
- r *trackableReader
+type Scanner struct {
+ r *byteReadSeeker
// pendingObject is used to detect if an object has been read, or still
// is waiting to be read
@@ -45,15 +49,21 @@ type Parser struct {
}
// NewParser returns a new Parser that reads from the packfile represented by r.
-func NewParser(r io.Reader) *Parser {
- return &Parser{r: &trackableReader{Reader: r}}
+func NewScannerFromReader(r io.Reader) *Scanner {
+ s := &trackableReader{Reader: r}
+ return NewScanner(s)
+}
+
+func NewScanner(r io.ReadSeeker) *Scanner {
+ s := &byteReadSeeker{r}
+ return &Scanner{r: s}
}
// Header reads the whole packfile header (signature, version and object count).
// It returns the version and the object count and performs checks on the
// validity of the signature and the version fields.
-func (p *Parser) Header() (version, objects uint32, err error) {
- sig, err := p.readSignature()
+func (s *Scanner) Header() (version, objects uint32, err error) {
+ sig, err := s.readSignature()
if err != nil {
if err == io.EOF {
err = ErrEmptyPackfile
@@ -62,29 +72,29 @@ func (p *Parser) Header() (version, objects uint32, err error) {
return
}
- if !p.isValidSignature(sig) {
+ if !s.isValidSignature(sig) {
err = ErrBadSignature
return
}
- version, err = p.readVersion()
+ version, err = s.readVersion()
if err != nil {
return
}
- if !p.isSupportedVersion(version) {
+ if !s.isSupportedVersion(version) {
err = ErrUnsupportedVersion.AddDetails("%d", version)
return
}
- objects, err = p.readCount()
+ objects, err = s.readCount()
return
}
// readSignature reads an returns the signature field in the packfile.
-func (p *Parser) readSignature() ([]byte, error) {
+func (s *Scanner) readSignature() ([]byte, error) {
var sig = make([]byte, 4)
- if _, err := io.ReadFull(p.r, sig); err != nil {
+ if _, err := io.ReadFull(s.r, sig); err != nil {
return []byte{}, err
}
@@ -92,58 +102,59 @@ func (p *Parser) readSignature() ([]byte, error) {
}
// isValidSignature returns if sig is a valid packfile signature.
-func (p *Parser) isValidSignature(sig []byte) bool {
+func (s *Scanner) isValidSignature(sig []byte) bool {
return bytes.Equal(sig, []byte{'P', 'A', 'C', 'K'})
}
// readVersion reads and returns the version field of a packfile.
-func (p *Parser) readVersion() (uint32, error) {
- return p.readInt32()
+func (s *Scanner) readVersion() (uint32, error) {
+ return s.readInt32()
}
// isSupportedVersion returns whether version v is supported by the parser.
// The current supported version is VersionSupported, defined above.
-func (p *Parser) isSupportedVersion(v uint32) bool {
+func (s *Scanner) isSupportedVersion(v uint32) bool {
return v == VersionSupported
}
// readCount reads and returns the count of objects field of a packfile.
-func (p *Parser) readCount() (uint32, error) {
- return p.readInt32()
+func (s *Scanner) readCount() (uint32, error) {
+ return s.readInt32()
}
// ReadInt32 reads 4 bytes and returns them as a Big Endian int32.
-func (p *Parser) readInt32() (uint32, error) {
+func (s *Scanner) readInt32() (uint32, error) {
var v uint32
- if err := binary.Read(p.r, binary.BigEndian, &v); err != nil {
+ if err := binary.Read(s.r, binary.BigEndian, &v); err != nil {
return 0, err
}
return v, nil
}
-func (p *Parser) NextObjectHeader() (*ObjectHeader, error) {
- if err := p.discardObjectIfNeeded(); err != nil {
+// NextObjectHeader returns the ObjectHeader for the next object in the reader
+func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
+ if err := s.discardObjectIfNeeded(); err != nil {
return nil, err
}
h := &ObjectHeader{}
- p.pendingObject = h
+ s.pendingObject = h
var err error
- h.Offset, err = p.r.Offset()
+ h.Offset, err = s.r.Seek(0, io.SeekCurrent)
if err != nil {
return nil, err
}
- h.Type, h.Length, err = p.readObjectTypeAndLength()
+ h.Type, h.Length, err = s.readObjectTypeAndLength()
if err != nil {
return nil, err
}
switch h.Type {
case core.OFSDeltaObject:
- no, err := p.readNegativeOffset()
+ no, err := s.readNegativeOffset()
if err != nil {
return nil, err
}
@@ -151,7 +162,7 @@ func (p *Parser) NextObjectHeader() (*ObjectHeader, error) {
h.OffsetReference = h.Offset + no
case core.REFDeltaObject:
var err error
- h.Reference, err = p.readHash()
+ h.Reference, err = s.readHash()
if err != nil {
return nil, err
}
@@ -160,7 +171,7 @@ func (p *Parser) NextObjectHeader() (*ObjectHeader, error) {
return h, nil
}
-func (s *Parser) discardObjectIfNeeded() error {
+func (s *Scanner) discardObjectIfNeeded() error {
if s.pendingObject == nil {
return nil
}
@@ -183,21 +194,21 @@ func (s *Parser) discardObjectIfNeeded() error {
// ReadObjectTypeAndLength reads and returns the object type and the
// length field from an object entry in a packfile.
-func (p Parser) readObjectTypeAndLength() (core.ObjectType, int64, error) {
- t, c, err := p.readType()
+func (s *Scanner) readObjectTypeAndLength() (core.ObjectType, int64, error) {
+ t, c, err := s.readType()
if err != nil {
return t, 0, err
}
- l, err := p.readLength(c)
+ l, err := s.readLength(c)
return t, l, err
}
-func (p Parser) readType() (core.ObjectType, byte, error) {
+func (s *Scanner) readType() (core.ObjectType, byte, error) {
var c byte
var err error
- if c, err = p.r.ReadByte(); err != nil {
+ if c, err = s.r.ReadByte(); err != nil {
return core.ObjectType(0), 0, err
}
@@ -208,14 +219,14 @@ func (p Parser) readType() (core.ObjectType, byte, error) {
// the length is codified in the last 4 bits of the first byte and in
// the last 7 bits of subsequent bytes. Last byte has a 0 MSB.
-func (p *Parser) readLength(first byte) (int64, error) {
+func (s *Scanner) readLength(first byte) (int64, error) {
length := int64(first & maskFirstLength)
c := first
shift := firstLengthBits
var err error
for moreBytesInLength(c) {
- if c, err = p.r.ReadByte(); err != nil {
+ if c, err = s.r.ReadByte(); err != nil {
return 0, err
}
@@ -226,15 +237,15 @@ func (p *Parser) readLength(first byte) (int64, error) {
return length, nil
}
-func (p *Parser) NextObject(w io.Writer) (written int64, err error) {
- p.pendingObject = nil
- return p.copyObject(w)
+func (s *Scanner) NextObject(w io.Writer) (written int64, err error) {
+ s.pendingObject = nil
+ return s.copyObject(w)
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
-func (p *Parser) copyObject(w io.Writer) (int64, error) {
- zr, err := zlib.NewReader(p.r)
+func (s *Scanner) copyObject(w io.Writer) (int64, error) {
+ zr, err := zlib.NewReader(s.r)
if err != nil {
if err != zlib.ErrHeader {
return -1, fmt.Errorf("zlib reading error: %s", err)
@@ -251,14 +262,35 @@ func (p *Parser) copyObject(w io.Writer) (int64, error) {
return io.Copy(w, zr)
}
-func (p *Parser) Checksum() (core.Hash, error) {
- return p.readHash()
+func (s *Scanner) IsSeekable() bool {
+ _, ok := s.r.ReadSeeker.(*trackableReader)
+ return !ok
+}
+
+// Seek sets a new offset from start, returns the old position before the change
+func (s *Scanner) Seek(offset int64) (previous int64, err error) {
+ previous, err = s.r.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return -1, err
+ }
+
+ _, err = s.r.Seek(offset, io.SeekStart)
+ return previous, err
+}
+
+func (s *Scanner) Checksum() (core.Hash, error) {
+ err := s.discardObjectIfNeeded()
+ if err != nil {
+ return core.ZeroHash, err
+ }
+
+ return s.readHash()
}
// ReadHash reads a hash.
-func (p *Parser) readHash() (core.Hash, error) {
+func (s *Scanner) readHash() (core.Hash, error) {
var h core.Hash
- if _, err := io.ReadFull(p.r, h[:]); err != nil {
+ if _, err := io.ReadFull(s.r, h[:]); err != nil {
return core.ZeroHash, err
}
@@ -292,18 +324,18 @@ func (p *Parser) readHash() (core.Hash, error) {
// while (ofs >>= 7)
// dheader[--pos] = 128 | (--ofs & 127);
//
-func (p *Parser) readNegativeOffset() (int64, error) {
+func (s *Scanner) readNegativeOffset() (int64, error) {
var c byte
var err error
- if c, err = p.r.ReadByte(); err != nil {
+ if c, err = s.r.ReadByte(); err != nil {
return 0, err
}
var offset = int64(c & maskLength)
for moreBytesInLength(c) {
offset++
- if c, err = p.r.ReadByte(); err != nil {
+ if c, err = s.r.ReadByte(); err != nil {
return 0, err
}
offset = (offset << lengthBits) + int64(c&maskLength)
@@ -312,6 +344,11 @@ func (p *Parser) readNegativeOffset() (int64, error) {
return -offset, nil
}
+func (s *Scanner) Close() error {
+ _, err := io.Copy(ioutil.Discard, s.r)
+ return err
+}
+
func moreBytesInLength(c byte) bool {
return c&maskContinue > 0
}
@@ -342,16 +379,23 @@ func (r *trackableReader) Read(p []byte) (n int, err error) {
return
}
+// Seek only supports io.SeekCurrent, any other operation fails
+func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
+ if whence != io.SeekCurrent {
+ return -1, ErrSeekNotSupported
+ }
+
+ return r.count, nil
+}
+
+type byteReadSeeker struct {
+ io.ReadSeeker
+}
+
// ReadByte reads a byte.
-func (r *trackableReader) ReadByte() (byte, error) {
+func (r *byteReadSeeker) ReadByte() (byte, error) {
var p [1]byte
- _, err := r.Reader.Read(p[:])
- r.count++
+ _, err := r.ReadSeeker.Read(p[:])
return p[0], err
}
-
-// Offset returns the number of bytes read.
-func (r *trackableReader) Offset() (int64, error) {
- return r.count, nil
-}
diff --git a/formats/packfile/parser_test.go b/formats/packfile/parser_test.go
index a7959a0..0c07daa 100644
--- a/formats/packfile/parser_test.go
+++ b/formats/packfile/parser_test.go
@@ -3,6 +3,7 @@ package packfile
import (
"bytes"
"encoding/base64"
+ "io/ioutil"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git.v4/core"
@@ -15,7 +16,7 @@ var _ = Suite(&ScannerSuite{})
func (s *ScannerSuite) TestHeader(c *C) {
data, _ := base64.StdEncoding.DecodeString(packFileWithEmptyObjects)
- p := NewParser(bytes.NewReader(data))
+ p := NewScanner(bytes.NewReader(data))
version, objects, err := p.Header()
c.Assert(err, IsNil)
c.Assert(version, Equals, VersionSupported)
@@ -25,7 +26,8 @@ func (s *ScannerSuite) TestHeader(c *C) {
func (s *ScannerSuite) TestNextObjectHeader(c *C) {
data, _ := base64.StdEncoding.DecodeString(packFileWithEmptyObjects)
- p := NewParser(bytes.NewReader(data))
+ r := bytes.NewReader(data)
+ p := NewScanner(r)
_, objects, err := p.Header()
c.Assert(err, IsNil)
@@ -39,12 +41,17 @@ func (s *ScannerSuite) TestNextObjectHeader(c *C) {
c.Assert(err, IsNil)
c.Assert(n, Equals, h.Length)
}
+
+ n, err := ioutil.ReadAll(r)
+ c.Assert(err, IsNil)
+ c.Assert(n, HasLen, 20)
}
func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObject(c *C) {
data, _ := base64.StdEncoding.DecodeString(packFileWithEmptyObjects)
- p := NewParser(bytes.NewReader(data))
+ r := bytes.NewReader(data)
+ p := NewScanner(r)
_, objects, err := p.Header()
c.Assert(err, IsNil)
@@ -53,6 +60,13 @@ func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObject(c *C) {
c.Assert(err, IsNil)
c.Assert(*h, DeepEquals, expectedHeaders[i])
}
+
+ err = p.discardObjectIfNeeded()
+ c.Assert(err, IsNil)
+
+ n, err := ioutil.ReadAll(r)
+ c.Assert(err, IsNil)
+ c.Assert(n, HasLen, 20)
}
var expectedHeaders = []ObjectHeader{
diff --git a/references_test.go b/references_test.go
index 3907b25..bb7a1cf 100644
--- a/references_test.go
+++ b/references_test.go
@@ -26,9 +26,9 @@ func (s *ReferencesSuite) SetUpSuite(c *C) {
f, err := os.Open(fix.packfile)
defer f.Close()
c.Assert(err, IsNil)
- r := packfile.NewSeekable(f)
+ r := packfile.NewScanner(f)
d := packfile.NewDecoder(r, s.repos[fix.url].s.ObjectStorage())
- err = d.Decode()
+ _, err = d.Decode()
c.Assert(err, IsNil)
}
}
diff --git a/remote.go b/remote.go
index 2605d39..bfa7bc9 100644
--- a/remote.go
+++ b/remote.go
@@ -161,10 +161,11 @@ func (r *Remote) buildRequest(
}
func (r *Remote) updateObjectStorage(reader io.Reader) error {
- stream := packfile.NewStream(reader)
+ stream := packfile.NewScannerFromReader(reader)
d := packfile.NewDecoder(stream, r.s.ObjectStorage())
- return d.Decode()
+ _, err := d.Decode()
+ return err
}
func (r *Remote) updateLocalReferenceStorage(specs []config.RefSpec, refs []*core.Reference) error {
diff --git a/remote_test.go b/remote_test.go
index 9bafcd1..e19f7b8 100644
--- a/remote_test.go
+++ b/remote_test.go
@@ -14,6 +14,10 @@ type RemoteSuite struct {
var _ = Suite(&RemoteSuite{})
+func (s *RemoteSuite) SetUpSuite(c *C) {
+ s.installMockProtocol(c)
+}
+
func (s *RemoteSuite) TestConnect(c *C) {
r := newRemote(nil, &config.RemoteConfig{Name: "foo", URL: RepositoryFixture})
@@ -35,16 +39,6 @@ func (s *RemoteSuite) TestnewRemoteInvalidSchemaEndpoint(c *C) {
c.Assert(err, NotNil)
}
-/*
-func (s *RemoteSuite) TestNewAuthenticatedRemote(c *C) {
- a := &http.BasicAuth{}
- r, err := NewAuthenticatedRemote("foo", RepositoryFixture, a)
- c.Assert(err, IsNil)
- c.Assert(r.Name, Equals, "foo")
- c.Assert(r.Endpoint.String(), Equals, RepositoryFixture)
- c.Assert(r.Auth, Equals, a)
-}*/
-
func (s *RemoteSuite) TestInfo(c *C) {
r := newRemote(nil, &config.RemoteConfig{Name: "foo", URL: RepositoryFixture})
r.upSrv = &MockGitUploadPackService{}
diff --git a/repository.go b/repository.go
index 049c7db..5f5527e 100644
--- a/repository.go
+++ b/repository.go
@@ -33,7 +33,7 @@ func NewMemoryRepository() *Repository {
// based on a fs.OS, if you want to use a custom one you need to use the function
// NewRepository and build you filesystem.Storage
func NewFilesystemRepository(path string) (*Repository, error) {
- s, err := filesystem.NewStorage(fs.NewOS(), path)
+ s, err := filesystem.NewStorage(fs.NewOS(path))
if err != nil {
return nil, err
}
diff --git a/storage/filesystem/internal/dotgit/dotgit_test.go b/storage/filesystem/internal/dotgit/dotgit_test.go
index 3f0a0eb..0097821 100644
--- a/storage/filesystem/internal/dotgit/dotgit_test.go
+++ b/storage/filesystem/internal/dotgit/dotgit_test.go
@@ -54,7 +54,7 @@ func (s *SuiteDotGit) SetUpSuite(c *C) {
path, err := tgz.Extract(init.tgz)
c.Assert(err, IsNil, com)
- s.fixtures[init.name] = fs.NewOSClient(filepath.Join(path, ".git"))
+ s.fixtures[init.name] = fs.NewOS(filepath.Join(path, ".git"))
}
}
@@ -233,7 +233,7 @@ func (s *SuiteDotGit) TestNewObjectPack(c *C) {
log.Fatal(err)
}
- dot := New(fs.NewOSClient(dir))
+ dot := New(fs.NewOS(dir))
r, err := os.Open("../../../../formats/packfile/fixtures/git-fixture.ofs-delta")
c.Assert(err, IsNil)
diff --git a/storage/filesystem/internal/index/index.go b/storage/filesystem/internal/index/index.go
index 70b77c5..2fd2a55 100644
--- a/storage/filesystem/internal/index/index.go
+++ b/storage/filesystem/internal/index/index.go
@@ -1,7 +1,6 @@
package index
import (
- "fmt"
"io"
"gopkg.in/src-d/go-git.v4/core"
@@ -13,6 +12,10 @@ import (
// Objects are identified by their hash.
type Index map[core.Hash]int64
+func New() Index {
+ return make(Index)
+}
+
// Decode decodes a idxfile into the Index
func (i *Index) Decode(r io.Reader) error {
d := idxfile.NewDecoder(r)
@@ -30,23 +33,16 @@ func (i *Index) Decode(r io.Reader) error {
// NewFrompackfile returns a new index from a packfile reader.
func NewFromPackfile(r io.Reader) (Index, core.Hash, error) {
- index := make(Index)
+ p := packfile.NewScannerFromReader(r)
+ d := packfile.NewDecoder(p, nil)
- p := packfile.NewParser(r)
- _, count, err := p.Header()
+ checksum, err := d.Decode()
if err != nil {
return nil, core.ZeroHash, err
}
- for i := 0; i < int(count); i++ {
- h, err := p.NextObjectHeader()
- if err = index.Set(core.ZeroHash, h.Offset); err != nil {
- return nil, core.ZeroHash, err
- }
- }
-
- hash, err := p.Checksum()
- return index, hash, err
+ index := Index(d.Index())
+ return index, checksum, p.Close()
}
// Get returns the offset that an object has the packfile.
@@ -61,9 +57,9 @@ func (i Index) Get(h core.Hash) (int64, error) {
// Set adds a new hash-offset pair to the index, or substitutes an existing one.
func (i Index) Set(h core.Hash, o int64) error {
- if _, ok := i[h]; ok {
+ /*if _, ok := i[h]; ok {
return fmt.Errorf("index.Set failed: duplicated key: %s", h)
- }
+ }*/
i[h] = o
diff --git a/storage/filesystem/internal/index/index_test.go b/storage/filesystem/internal/index/index_test.go
index 4ddfc25..5261012 100644
--- a/storage/filesystem/internal/index/index_test.go
+++ b/storage/filesystem/internal/index/index_test.go
@@ -1,6 +1,7 @@
package index
import (
+ "io/ioutil"
"os"
"testing"
@@ -16,6 +17,22 @@ type SuiteIndex struct{}
var _ = Suite(&SuiteIndex{})
+func (s *SuiteIndex) TestNewFromPackfile(c *C) {
+ path := "../../../../formats/packfile/fixtures/spinnaker-spinnaker.pack"
+
+ pack, err := os.Open(path)
+ c.Assert(err, IsNil)
+
+ _, checksum, err := NewFromPackfile(pack)
+ c.Assert(err, IsNil)
+
+ leftover, err := ioutil.ReadAll(pack)
+ c.Assert(err, IsNil)
+ c.Assert(leftover, HasLen, 0)
+
+ c.Assert(checksum.String(), Equals, "da4c488bbbdc4e599c7c97d01753bb3144fccd9c")
+}
+
func (s *SuiteIndex) TestNewFromIdx(c *C) {
for i, test := range [...]struct {
idxPath string
@@ -33,7 +50,9 @@ func (s *SuiteIndex) TestNewFromIdx(c *C) {
idx, err := os.Open(test.idxPath)
c.Assert(err, IsNil, com)
- index, err := NewFromIdx(idx)
+ index := New()
+ err = index.Decode(idx)
+
if test.errRegexp != "" {
c.Assert(err, ErrorMatches, test.errRegexp, com)
} else {
@@ -78,7 +97,9 @@ func (s *SuiteIndex) TestGet(c *C) {
idx, err := os.Open(test.idx)
c.Assert(err, IsNil, com)
- index, err := NewFromIdx(idx)
+ index := New()
+ err = index.Decode(idx)
+
c.Assert(err, IsNil, com)
obt, err := index.Get(test.hash)
@@ -106,7 +127,9 @@ func (s *SuiteIndex) BenchmarkFromIdx(c *C) {
for i := 0; i < c.N; i++ {
c.StartTimer()
- index, _ := NewFromIdx(idx)
+ index := New()
+ index.Decode(idx)
+
c.StopTimer()
indexes = append(indexes, index)
}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index aef30ec..c6d5359 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -3,7 +3,6 @@ package filesystem
import (
"fmt"
"io"
- "os"
"gopkg.in/src-d/go-git.v4/core"
"gopkg.in/src-d/go-git.v4/formats/objfile"
@@ -94,7 +93,7 @@ func (s *ObjectStorage) getFromUnpacked(t core.ObjectType, h core.Hash) (obj cor
// Get returns the object with the given hash, by searching for it in
// the packfile.
-func (s *ObjectStorage) getFromPackfile(t core.ObjectType, h core.Hash) (obj core.Object, err error) {
+func (s *ObjectStorage) getFromPackfile(t core.ObjectType, h core.Hash) (core.Object, error) {
offset, err := s.index.Get(h)
if err != nil {
return nil, err
@@ -117,23 +116,18 @@ func (s *ObjectStorage) getFromPackfile(t core.ObjectType, h core.Hash) (obj cor
}
}()
- _, err = f.Seek(offset, os.SEEK_SET)
- if err != nil {
- return nil, err
- }
-
- r := packfile.NewSeekable(f)
- r.HashToOffset = map[core.Hash]int64(s.index)
- p := packfile.NewParser(r)
+ p := packfile.NewScanner(f)
+ d := packfile.NewDecoder(p, nil)
- obj = s.NewObject()
- err = p.FillObject(obj)
+ obj, err := d.ReadObjectAt(offset)
if err != nil {
return nil, err
}
+
if core.AnyObject != t && obj.Type() != t {
return nil, core.ErrObjectNotFound
}
+
return obj, nil
}
@@ -213,7 +207,8 @@ func buildIndexFromPackfile(dir *dotgit.DotGit) (index.Index, error) {
}
}()
- return index.NewFromPackfile(f)
+ index, _, err := index.NewFromPackfile(f)
+ return index, err
}
func buildIndexFromIdxfile(fs fs.Filesystem, path string) (index.Index, error) {
@@ -229,7 +224,8 @@ func buildIndexFromIdxfile(fs fs.Filesystem, path string) (index.Index, error) {
}
}()
- return index.NewFromIdx(f)
+ i := index.New()
+ return i, i.Decode(f)
}
func (o *ObjectStorage) Begin() core.TxObjectStorage {
diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go
index e9cfa4c..142bad9 100644
--- a/storage/filesystem/object_test.go
+++ b/storage/filesystem/object_test.go
@@ -91,7 +91,7 @@ func (s *FsSuite) TestHashNotFound(c *C) {
func (s *FsSuite) newObjectStorage(c *C, fixtureName string) core.ObjectStorage {
path := fixture(fixtureName, c)
- fs := fs.NewOSClient(filepath.Join(path, ".git/"))
+ fs := fs.NewOS(filepath.Join(path, ".git/"))
store, err := NewStorage(fs)
c.Assert(err, IsNil)
@@ -110,7 +110,7 @@ func (s *FsSuite) TestGetCompareWithMemoryStorage(c *C) {
i, fixId, path)
gitPath := filepath.Join(path, ".git/")
- fs := fs.NewOSClient(gitPath)
+ fs := fs.NewOS(gitPath)
memSto, err := memStorageFromGitDir(fs, gitPath)
c.Assert(err, IsNil, com)
@@ -142,10 +142,10 @@ func memStorageFromGitDir(fs fs.Filesystem, path string) (core.ObjectStorage, er
}
sto := memory.NewStorage()
- r := packfile.NewStream(f)
+ r := packfile.NewScanner(f)
d := packfile.NewDecoder(r, sto.ObjectStorage())
- err = d.Decode()
+ _, err = d.Decode()
if err != nil {
return nil, err
}
@@ -241,7 +241,7 @@ func (s *FsSuite) TestIterCompareWithMemoryStorage(c *C) {
i, fixId, path)
gitPath := filepath.Join(path, ".git/")
- fs := fs.NewOSClient(gitPath)
+ fs := fs.NewOS(gitPath)
memSto, err := memStorageFromDirPath(fs, gitPath)
c.Assert(err, IsNil, com)
@@ -287,9 +287,9 @@ func memStorageFromDirPath(fs fs.Filesystem, path string) (core.ObjectStorage, e
sto := memory.NewStorage()
- r := packfile.NewStream(f)
+ r := packfile.NewScanner(f)
d := packfile.NewDecoder(r, sto.ObjectStorage())
- err = d.Decode()
+ _, err = d.Decode()
if err != nil {
return nil, err
}
diff --git a/storage/memory/storage.go b/storage/memory/storage.go
index 216336c..1bf99fc 100644
--- a/storage/memory/storage.go
+++ b/storage/memory/storage.go
@@ -130,6 +130,7 @@ func (o *ObjectStorage) Set(obj core.Object) (core.Hash, error) {
case core.TagObject:
o.Tags[h] = o.Objects[h]
default:
+ fmt.Println(obj.Type())
return h, ErrUnsupportedObjectType
}
diff --git a/tree_diff_test.go b/tree_diff_test.go
index e177c26..9a0b04c 100644
--- a/tree_diff_test.go
+++ b/tree_diff_test.go
@@ -48,9 +48,9 @@ func (s *DiffTreeSuite) SetUpSuite(c *C) {
f, err := os.Open(fixRepo.packfile)
c.Assert(err, IsNil)
- r := packfile.NewSeekable(f)
+ r := packfile.NewScanner(f)
d := packfile.NewDecoder(r, s.repos[fixRepo.url].s.ObjectStorage())
- err = d.Decode()
+ _, err = d.Decode()
c.Assert(err, IsNil)
c.Assert(f.Close(), IsNil)
diff --git a/utils/fs/os.go b/utils/fs/os.go
index 40942ba..1ae4204 100644
--- a/utils/fs/os.go
+++ b/utils/fs/os.go
@@ -7,26 +7,21 @@ import (
"path/filepath"
)
-// NewOS returns a new OS.
-func NewOS() Filesystem {
- return &OSClient{}
-}
-
// OSClient a filesystem based on OSClient
-type OSClient struct {
+type OS struct {
RootDir string
}
// NewOSClient returns a new OSClient
-func NewOSClient(rootDir string) *OSClient {
- return &OSClient{
+func NewOS(rootDir string) *OS {
+ return &OS{
RootDir: rootDir,
}
}
// Create creates a new GlusterFSFile
-func (c *OSClient) Create(filename string) (File, error) {
- fullpath := path.Join(c.RootDir, filename)
+func (fs *OS) Create(filename string) (File, error) {
+ fullpath := path.Join(fs.RootDir, filename)
dir := filepath.Dir(fullpath)
if dir != "." {
@@ -48,8 +43,8 @@ func (c *OSClient) Create(filename string) (File, error) {
// ReadDir returns the filesystem info for all the archives under the specified
// path.
-func (c *OSClient) ReadDir(path string) ([]FileInfo, error) {
- fullpath := c.Join(c.RootDir, path)
+func (fs *OS) ReadDir(path string) ([]FileInfo, error) {
+ fullpath := fs.Join(fs.RootDir, path)
l, err := ioutil.ReadDir(fullpath)
if err != nil {
@@ -64,20 +59,20 @@ func (c *OSClient) ReadDir(path string) ([]FileInfo, error) {
return s, nil
}
-func (c *OSClient) Rename(from, to string) error {
+func (fs *OS) Rename(from, to string) error {
if !filepath.IsAbs(from) {
- from = c.Join(c.RootDir, from)
+ from = fs.Join(fs.RootDir, from)
}
if !filepath.IsAbs(to) {
- to = c.Join(c.RootDir, to)
+ to = fs.Join(fs.RootDir, to)
}
return os.Rename(from, to)
}
-func (c *OSClient) Open(filename string) (File, error) {
- fullpath := c.Join(c.RootDir, filename)
+func (fs *OS) Open(filename string) (File, error) {
+ fullpath := fs.Join(fs.RootDir, filename)
f, err := os.Open(fullpath)
if err != nil {
@@ -90,22 +85,22 @@ func (c *OSClient) Open(filename string) (File, error) {
}, nil
}
-func (c *OSClient) Stat(filename string) (FileInfo, error) {
- fullpath := c.Join(c.RootDir, filename)
+func (fs *OS) Stat(filename string) (FileInfo, error) {
+ fullpath := fs.Join(fs.RootDir, filename)
return os.Stat(fullpath)
}
// Join joins the specified elements using the filesystem separator.
-func (c *OSClient) Join(elem ...string) string {
+func (fs *OS) Join(elem ...string) string {
return filepath.Join(elem...)
}
-func (c *OSClient) Dir(path string) Filesystem {
- return NewOSClient(c.Join(c.RootDir, path))
+func (fs *OS) Dir(path string) Filesystem {
+ return NewOS(fs.Join(fs.RootDir, path))
}
-func (c *OSClient) Base() string {
- return c.RootDir
+func (fs *OS) Base() string {
+ return fs.RootDir
}
type OSFile struct {
diff --git a/utils/fs/os_test.go b/utils/fs/os_test.go
index c148265..acc6bdd 100644
--- a/utils/fs/os_test.go
+++ b/utils/fs/os_test.go
@@ -16,16 +16,16 @@ var _ = Suite(&WritersSuite{})
func (s *WritersSuite) TestOSClient_Create(c *C) {
path := getTempDir()
- client := NewOSClient(path)
+ client := NewOS(path)
f, err := client.Create("foo")
c.Assert(err, IsNil)
- c.Assert(f.(*OSFile).file.Name(), Equals, f.GetFilename())
+ c.Assert(f.(*OSFile).file.Name(), Equals, f.Filename())
}
func (s *WritersSuite) TestOSClient_Write(c *C) {
path := getTempDir()
- client := NewOSClient(path)
+ client := NewOS(path)
f, err := client.Create("foo")
c.Assert(err, IsNil)
@@ -39,14 +39,14 @@ func (s *WritersSuite) TestOSClient_Write(c *C) {
func (s *WritersSuite) TestOSClient_Close(c *C) {
path := getTempDir()
- client := NewOSClient(path)
+ client := NewOS(path)
f, err := client.Create("foo")
c.Assert(err, IsNil)
f.Write([]byte("foo"))
c.Assert(f.Close(), IsNil)
- wrote, _ := ioutil.ReadFile(f.GetFilename())
+ wrote, _ := ioutil.ReadFile(f.Filename())
c.Assert(wrote, DeepEquals, []byte("foo"))
}