aboutsummaryrefslogtreecommitdiffstats
path: root/plumbing/format/objfile
diff options
context:
space:
mode:
authorMáximo Cuadros <mcuadros@gmail.com>2016-11-08 23:46:38 +0100
committerGitHub <noreply@github.com>2016-11-08 23:46:38 +0100
commitac095bb12c4d29722b60ba9f20590fa7cfa6bc7d (patch)
tree223f36f336ba3414b1e45cac8af6c4744a5d7ef6 /plumbing/format/objfile
parente523701393598f4fa241dd407af9ff8925507a1a (diff)
downloadgo-git-ac095bb12c4d29722b60ba9f20590fa7cfa6bc7d.tar.gz
new plumbing package (#118)
* plumbing: now core was renamed to core, and formats and clients moved inside
Diffstat (limited to 'plumbing/format/objfile')
-rw-r--r--plumbing/format/objfile/common_test.go69
-rw-r--r--plumbing/format/objfile/reader.go118
-rw-r--r--plumbing/format/objfile/reader_test.go67
-rw-r--r--plumbing/format/objfile/writer.go109
-rw-r--r--plumbing/format/objfile/writer_test.go80
5 files changed, 443 insertions, 0 deletions
diff --git a/plumbing/format/objfile/common_test.go b/plumbing/format/objfile/common_test.go
new file mode 100644
index 0000000..7c8b75c
--- /dev/null
+++ b/plumbing/format/objfile/common_test.go
@@ -0,0 +1,69 @@
+package objfile
+
+import (
+ "encoding/base64"
+ "testing"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type objfileFixture struct {
+ hash string // hash of data
+ t plumbing.ObjectType // object type
+ content string // base64-encoded content
+ data string // base64-encoded objfile data
+}
+
+var objfileFixtures = []objfileFixture{
+ {
+ "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("")),
+ "eAFLyslPUjBgAAAJsAHw",
+ },
+ {
+ "a8a940627d132695a9769df883f85992f0ff4a43",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("this is a test")),
+ "eAFLyslPUjA0YSjJyCxWAKJEhZLU4hIAUDYHOg==",
+ },
+ {
+ "4dc2174801ac4a3d36886210fd086fbe134cf7b2",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("this\nis\n\n\na\nmultiline\n\ntest.\n")),
+ "eAFLyslPUjCyZCjJyCzmAiIurkSu3NKcksyczLxULq6S1OISPS4A1I8LMQ==",
+ },
+ {
+ "13e6f47dd57798bfdc728d91f5c6d7f40c5bb5fc",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("this tests\r\nCRLF\r\nencoded files.\r\n")),
+ "eAFLyslPUjA2YSjJyCxWKEktLinm5XIO8nHj5UrNS85PSU1RSMvMSS3W4+UCABp3DNE=",
+ },
+ {
+ "72a7bc4667ab068e954172437b993d9fbaa137cb",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("test@example.com")),
+ "eAFLyslPUjA0YyhJLS5xSK1IzC3ISdVLzs8FAGVtCIA=",
+ },
+ {
+ "bb2b40e85ec0455d1de72daff71583f0dd72a33f",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"io\"\r\n\t\"os\"\r\n\r\n\t\"gopkg.in/src-d/go-git.v3\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Printf(\"Retrieving %q ...\\n\", os.Args[2])\r\n\tr, err := git.NewRepository(os.Args[2], nil)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tif err := r.Pull(\"origin\", \"refs/heads/master\"); err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tdumpCommits(r)\r\n}\r\n\r\nfunc dumpCommits(r *git.Repository) {\r\n\titer := r.Commits()\r\n\tdefer iter.Close()\r\n\r\n\tfor {\r\n\t\tcommit, err := iter.Next()\r\n\t\tif err != nil {\r\n\t\t\tif err == io.EOF {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\r\n\t\tfmt.Println(commit)\r\n\t}\r\n}\r\n")),
+ "eAGNUU1LAzEU9JpC/0NcEFJps2ARQdmDFD3W0qt6SHez8dHdZH1JqyL+d/Oy/aDgQVh47LzJTGayatyKX99MzzpVrpXRvFVgh4PhANrOYeBiOGBZ3YaMJrg0nI+D/o3r1kaCzT2Wkyo3bmIgyO00rkfEqDe2TIJixL/jgagjFwg21CJb6oCgt2ANv3jnUsoXm4258/IejX++eo0CDMdcI/LbgpPuXH8sdec8BIdf4sgccwsN0aFO9POCgGTIOmWhFFGE9j/p1jtWFEW52DSNyByCAXLPUNc+f9Oq8nmrfNCYje7+o1lt2m7m2haCF2SVnFL6kw2/pBzHEH0rEH0oI8q9BF220nWEaSdnjfNaRDDCtcM+WZnsDgUl4lx/BuKxv6rYY0XBwcmHp8deh7EVarWmQ7uC2Glre/TweI0VvTk5xaTx+wWX66Gs",
+ },
+ {
+ "e94db0f9ffca44dc7bade6a3591f544183395a7c",
+ plumbing.TreeObject,
+ "MTAwNjQ0IFRlc3QgMS50eHQAqKlAYn0TJpWpdp34g/hZkvD/SkMxMDA2NDQgVGVzdCAyLnR4dABNwhdIAaxKPTaIYhD9CG++E0z3sjEwMDY0NCBUZXN0IDMudHh0ABPm9H3Vd5i/3HKNkfXG1/QMW7X8MTAwNjQ0IFRlc3QgNC50eHQAcqe8RmerBo6VQXJDe5k9n7qhN8sxMDA2NDQgVGVzdCA1LnR4dAC7K0DoXsBFXR3nLa/3FYPw3XKjPw==",
+ "eAErKUpNVTC0NGAwNDAwMzFRCEktLlEw1CupKGFYsdIhqVZYberKsrk/mn9ETvrw38sZWZURWJXvIXEPxjVetmYdSQJ/OfL3Cft834SsyhisSvjZl9qr5TP23ynqnfj12PUvPNFb/yCrMgGrKlq+xy19NVvfVMci5+qZtvN3LTQ/jazKFKxqt7bDi7gDrrGyz3XXfxdt/nC3aLE9AA2STmk=",
+ },
+ {
+ "9d7f8a56eaf92469dee8a856e716a03387ddb076",
+ plumbing.CommitObject,
+ "dHJlZSBlOTRkYjBmOWZmY2E0NGRjN2JhZGU2YTM1OTFmNTQ0MTgzMzk1YTdjCmF1dGhvciBKb3NodWEgU2pvZGluZyA8am9zaHVhLnNqb2RpbmdAc2NqYWxsaWFuY2UuY29tPiAxNDU2NTMxNTgzIC0wODAwCmNvbW1pdHRlciBKb3NodWEgU2pvZGluZyA8am9zaHVhLnNqb2RpbmdAc2NqYWxsaWFuY2UuY29tPiAxNDU2NTMxNTgzIC0wODAwCgpUZXN0IENvbW1pdAo=",
+ "eAGtjksOgjAUAF33FO8CktZ+aBNjTNy51Qs8Xl8FAjSh5f4SvILLmcVkKM/zUOEi3amuzMDBxE6mkBKhMZHaDiM71DaoZI1RXutgsSWBW+3zCs9c+g3hNeY4LB+4jgc35cf3QiNO04ALcUN5voEy1lmtrNdwll5Ksdt9oPIfUuLNpcLjCIov3ApFmQ==",
+ },
+}
+
+func Test(t *testing.T) { TestingT(t) }
diff --git a/plumbing/format/objfile/reader.go b/plumbing/format/objfile/reader.go
new file mode 100644
index 0000000..e7e119c
--- /dev/null
+++ b/plumbing/format/objfile/reader.go
@@ -0,0 +1,118 @@
+package objfile
+
+import (
+ "compress/zlib"
+ "errors"
+ "io"
+ "strconv"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+)
+
+var (
+ ErrClosed = errors.New("objfile: already closed")
+ ErrHeader = errors.New("objfile: invalid header")
+ ErrNegativeSize = errors.New("objfile: negative object size")
+)
+
+// Reader reads and decodes compressed objfile data from a provided io.Reader.
+// Reader implements io.ReadCloser. Close should be called when finished with
+// the Reader. Close will not close the underlying io.Reader.
+type Reader struct {
+ multi io.Reader
+ zlib io.ReadCloser
+ hasher plumbing.Hasher
+}
+
+// NewReader returns a new Reader reading from r.
+func NewReader(r io.Reader) (*Reader, error) {
+ zlib, err := zlib.NewReader(r)
+ if err != nil {
+ return nil, packfile.ErrZLib.AddDetails(err.Error())
+ }
+
+ return &Reader{
+ zlib: zlib,
+ }, nil
+}
+
+// Header reads the type and the size of object, and prepares the reader for read
+func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) {
+ var raw []byte
+ raw, err = r.readUntil(' ')
+ if err != nil {
+ return
+ }
+
+ t, err = plumbing.ParseObjectType(string(raw))
+ if err != nil {
+ return
+ }
+
+ raw, err = r.readUntil(0)
+ if err != nil {
+ return
+ }
+
+ size, err = strconv.ParseInt(string(raw), 10, 64)
+ if err != nil {
+ err = ErrHeader
+ return
+ }
+
+ defer r.prepareForRead(t, size)
+ return
+}
+
+// readSlice reads one byte at a time from r until it encounters delim or an
+// error.
+func (r *Reader) readUntil(delim byte) ([]byte, error) {
+ var buf [1]byte
+ value := make([]byte, 0, 16)
+ for {
+ if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) {
+ if err == io.EOF {
+ return nil, ErrHeader
+ }
+ return nil, err
+ }
+
+ if buf[0] == delim {
+ return value, nil
+ }
+
+ value = append(value, buf[0])
+ }
+}
+
+func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) {
+ r.hasher = plumbing.NewHasher(t, size)
+ r.multi = io.TeeReader(r.zlib, r.hasher)
+}
+
+// Read reads len(p) bytes into p from the object data stream. It returns
+// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even
+// if Read returns n < len(p), it may use all of p as scratch space during the
+// call.
+//
+// If Read encounters the end of the data stream it will return err == io.EOF,
+// either in the current call if n > 0 or in a subsequent call.
+func (r *Reader) Read(p []byte) (n int, err error) {
+ return r.multi.Read(p)
+}
+
+// Hash returns the hash of the object data stream that has been read so far.
+func (r *Reader) Hash() plumbing.Hash {
+ return r.hasher.Sum()
+}
+
+// Close releases any resources consumed by the Reader. Calling Close does not
+// close the wrapped io.Reader originally passed to NewReader.
+func (r *Reader) Close() error {
+ if err := r.zlib.Close(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/plumbing/format/objfile/reader_test.go b/plumbing/format/objfile/reader_test.go
new file mode 100644
index 0000000..715792d
--- /dev/null
+++ b/plumbing/format/objfile/reader_test.go
@@ -0,0 +1,67 @@
+package objfile
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type SuiteReader struct{}
+
+var _ = Suite(&SuiteReader{})
+
+func (s *SuiteReader) TestReadObjfile(c *C) {
+ for k, fixture := range objfileFixtures {
+ com := fmt.Sprintf("test %d: ", k)
+ hash := plumbing.NewHash(fixture.hash)
+ content, _ := base64.StdEncoding.DecodeString(fixture.content)
+ data, _ := base64.StdEncoding.DecodeString(fixture.data)
+
+ testReader(c, bytes.NewReader(data), hash, fixture.t, content, com)
+ }
+}
+
+func testReader(c *C, source io.Reader, hash plumbing.Hash, t plumbing.ObjectType, content []byte, com string) {
+ r, err := NewReader(source)
+ c.Assert(err, IsNil)
+
+ typ, size, err := r.Header()
+ c.Assert(err, IsNil)
+ c.Assert(typ, Equals, t)
+ c.Assert(content, HasLen, int(size))
+
+ rc, err := ioutil.ReadAll(r)
+ c.Assert(err, IsNil)
+ c.Assert(rc, DeepEquals, content, Commentf("%scontent=%s, expected=%s", base64.StdEncoding.EncodeToString(rc), base64.StdEncoding.EncodeToString(content)))
+
+ c.Assert(r.Hash(), Equals, hash) // Test Hash() before close
+ c.Assert(r.Close(), IsNil)
+
+}
+
+func (s *SuiteReader) TestReadEmptyObjfile(c *C) {
+ source := bytes.NewReader([]byte{})
+ _, err := NewReader(source)
+ c.Assert(err, NotNil)
+}
+
+func (s *SuiteReader) TestReadGarbage(c *C) {
+ source := bytes.NewReader([]byte("!@#$RO!@NROSADfinq@o#irn@oirfn"))
+ _, err := NewReader(source)
+ c.Assert(err, NotNil)
+}
+
+func (s *SuiteReader) TestReadCorruptZLib(c *C) {
+ data, _ := base64.StdEncoding.DecodeString("eAFLysaalPUjBgAAAJsAHw")
+ source := bytes.NewReader(data)
+ r, err := NewReader(source)
+ c.Assert(err, IsNil)
+
+ _, _, err = r.Header()
+ c.Assert(err, NotNil)
+}
diff --git a/plumbing/format/objfile/writer.go b/plumbing/format/objfile/writer.go
new file mode 100644
index 0000000..44563d2
--- /dev/null
+++ b/plumbing/format/objfile/writer.go
@@ -0,0 +1,109 @@
+package objfile
+
+import (
+ "compress/zlib"
+ "errors"
+ "io"
+ "strconv"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+var (
+ ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)")
+)
+
+// Writer writes and encodes data in compressed objfile format to a provided
+// io.Writerº. Close should be called when finished with the Writer. Close will
+// not close the underlying io.Writer.
+type Writer struct {
+ raw io.Writer
+ zlib io.WriteCloser
+ hasher plumbing.Hasher
+ multi io.Writer
+
+ closed bool
+ pending int64 // number of unwritten bytes
+}
+
+// NewWriter returns a new Writer writing to w.
+//
+// The returned Writer implements io.WriteCloser. Close should be called when
+// finished with the Writer. Close will not close the underlying io.Writer.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ raw: w,
+ zlib: zlib.NewWriter(w),
+ }
+}
+
+// WriteHeader writes the type and the size and prepares to accept the object's
+// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a
+// negative size is provided, ErrNegativeSize is returned.
+func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error {
+ if !t.Valid() {
+ return plumbing.ErrInvalidType
+ }
+ if size < 0 {
+ return ErrNegativeSize
+ }
+
+ b := t.Bytes()
+ b = append(b, ' ')
+ b = append(b, []byte(strconv.FormatInt(size, 10))...)
+ b = append(b, 0)
+
+ defer w.prepareForWrite(t, size)
+ _, err := w.zlib.Write(b)
+
+ return err
+}
+
+func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) {
+ w.pending = size
+
+ w.hasher = plumbing.NewHasher(t, size)
+ w.multi = io.MultiWriter(w.zlib, w.hasher)
+}
+
+// Write writes the object's contents. Write returns the error ErrOverflow if
+// more than size bytes are written after WriteHeader.
+func (w *Writer) Write(p []byte) (n int, err error) {
+ if w.closed {
+ return 0, ErrClosed
+ }
+
+ overwrite := false
+ if int64(len(p)) > w.pending {
+ p = p[0:w.pending]
+ overwrite = true
+ }
+
+ n, err = w.multi.Write(p)
+ w.pending -= int64(n)
+ if err == nil && overwrite {
+ err = ErrOverflow
+ return
+ }
+
+ return
+}
+
+// Hash returns the hash of the object data stream that has been written so far.
+// It can be called before or after Close.
+func (w *Writer) Hash() plumbing.Hash {
+ return w.hasher.Sum() // Not yet closed, return hash of data written so far
+}
+
+// Close releases any resources consumed by the Writer.
+//
+// Calling Close does not close the wrapped io.Writer originally passed to
+// NewWriter.
+func (w *Writer) Close() error {
+ if err := w.zlib.Close(); err != nil {
+ return err
+ }
+
+ w.closed = true
+ return nil
+}
diff --git a/plumbing/format/objfile/writer_test.go b/plumbing/format/objfile/writer_test.go
new file mode 100644
index 0000000..46dbea6
--- /dev/null
+++ b/plumbing/format/objfile/writer_test.go
@@ -0,0 +1,80 @@
+package objfile
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type SuiteWriter struct{}
+
+var _ = Suite(&SuiteWriter{})
+
+func (s *SuiteWriter) TestWriteObjfile(c *C) {
+ for k, fixture := range objfileFixtures {
+ buffer := bytes.NewBuffer(nil)
+
+ com := fmt.Sprintf("test %d: ", k)
+ hash := plumbing.NewHash(fixture.hash)
+ content, _ := base64.StdEncoding.DecodeString(fixture.content)
+
+ // Write the data out to the buffer
+ testWriter(c, buffer, hash, fixture.t, content)
+
+ // Read the data back in from the buffer to be sure it matches
+ testReader(c, buffer, hash, fixture.t, content, com)
+ }
+}
+
+func testWriter(c *C, dest io.Writer, hash plumbing.Hash, t plumbing.ObjectType, content []byte) {
+ size := int64(len(content))
+ w := NewWriter(dest)
+
+ err := w.WriteHeader(t, size)
+ c.Assert(err, IsNil)
+
+ written, err := io.Copy(w, bytes.NewReader(content))
+ c.Assert(err, IsNil)
+ c.Assert(written, Equals, size)
+
+ c.Assert(w.Hash(), Equals, hash)
+ c.Assert(w.Close(), IsNil)
+}
+
+func (s *SuiteWriter) TestWriteOverflow(c *C) {
+ buf := bytes.NewBuffer(nil)
+ w := NewWriter(buf)
+
+ err := w.WriteHeader(plumbing.BlobObject, 8)
+ c.Assert(err, IsNil)
+
+ n, err := w.Write([]byte("1234"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 4)
+
+ n, err = w.Write([]byte("56789"))
+ c.Assert(err, Equals, ErrOverflow)
+ c.Assert(n, Equals, 4)
+}
+
+func (s *SuiteWriter) TestNewWriterInvalidType(c *C) {
+ buf := bytes.NewBuffer(nil)
+ w := NewWriter(buf)
+
+ err := w.WriteHeader(plumbing.InvalidObject, 8)
+ c.Assert(err, Equals, plumbing.ErrInvalidType)
+}
+
+func (s *SuiteWriter) TestNewWriterInvalidSize(c *C) {
+ buf := bytes.NewBuffer(nil)
+ w := NewWriter(buf)
+
+ err := w.WriteHeader(plumbing.BlobObject, -1)
+ c.Assert(err, Equals, ErrNegativeSize)
+ err = w.WriteHeader(plumbing.BlobObject, -1651860)
+ c.Assert(err, Equals, ErrNegativeSize)
+}