aboutsummaryrefslogtreecommitdiffstats
path: root/formats
diff options
context:
space:
mode:
authorMáximo Cuadros <mcuadros@gmail.com>2016-11-08 23:46:38 +0100
committerGitHub <noreply@github.com>2016-11-08 23:46:38 +0100
commitac095bb12c4d29722b60ba9f20590fa7cfa6bc7d (patch)
tree223f36f336ba3414b1e45cac8af6c4744a5d7ef6 /formats
parente523701393598f4fa241dd407af9ff8925507a1a (diff)
downloadgo-git-ac095bb12c4d29722b60ba9f20590fa7cfa6bc7d.tar.gz
new plumbing package (#118)
* plumbing: now core was renamed to core, and formats and clients moved inside
Diffstat (limited to 'formats')
-rw-r--r--formats/config/common.go97
-rw-r--r--formats/config/common_test.go86
-rw-r--r--formats/config/decoder.go37
-rw-r--r--formats/config/decoder_test.go90
-rw-r--r--formats/config/doc.go199
-rw-r--r--formats/config/encoder.go75
-rw-r--r--formats/config/encoder_test.go21
-rw-r--r--formats/config/fixtures_test.go90
-rw-r--r--formats/config/option.go83
-rw-r--r--formats/config/option_test.go33
-rw-r--r--formats/config/section.go87
-rw-r--r--formats/config/section_test.go71
-rw-r--r--formats/idxfile/decoder.go148
-rw-r--r--formats/idxfile/decoder_test.go69
-rw-r--r--formats/idxfile/doc.go132
-rw-r--r--formats/idxfile/encoder.go131
-rw-r--r--formats/idxfile/encoder_test.go48
-rw-r--r--formats/idxfile/idxfile.go62
-rw-r--r--formats/index/decoder.go446
-rw-r--r--formats/index/decoder_test.go196
-rw-r--r--formats/index/doc.go302
-rw-r--r--formats/index/encoder.go141
-rw-r--r--formats/index/encoder_test.go78
-rw-r--r--formats/index/index.go108
-rw-r--r--formats/objfile/common_test.go69
-rw-r--r--formats/objfile/reader.go118
-rw-r--r--formats/objfile/reader_test.go67
-rw-r--r--formats/objfile/writer.go109
-rw-r--r--formats/objfile/writer_test.go80
-rw-r--r--formats/packfile/decoder.go306
-rw-r--r--formats/packfile/decoder_test.go182
-rw-r--r--formats/packfile/delta.go181
-rw-r--r--formats/packfile/doc.go168
-rw-r--r--formats/packfile/error.go30
-rw-r--r--formats/packfile/scanner.go418
-rw-r--r--formats/packfile/scanner_test.go189
-rw-r--r--formats/packp/advrefs/advrefs.go58
-rw-r--r--formats/packp/advrefs/advrefs_test.go315
-rw-r--r--formats/packp/advrefs/decoder.go288
-rw-r--r--formats/packp/advrefs/decoder_test.go500
-rw-r--r--formats/packp/advrefs/encoder.go155
-rw-r--r--formats/packp/advrefs/encoder_test.go249
-rw-r--r--formats/packp/capabilities.go136
-rw-r--r--formats/packp/capabilities_test.go46
-rw-r--r--formats/packp/doc.go724
-rw-r--r--formats/packp/pktline/encoder.go123
-rw-r--r--formats/packp/pktline/encoder_test.go249
-rw-r--r--formats/packp/pktline/scanner.go133
-rw-r--r--formats/packp/pktline/scanner_test.go225
-rw-r--r--formats/packp/ulreq/decoder.go287
-rw-r--r--formats/packp/ulreq/decoder_test.go541
-rw-r--r--formats/packp/ulreq/encoder.go140
-rw-r--r--formats/packp/ulreq/encoder_test.go268
-rw-r--r--formats/packp/ulreq/ulreq.go56
-rw-r--r--formats/packp/ulreq/ulreq_test.go91
55 files changed, 0 insertions, 9331 deletions
diff --git a/formats/config/common.go b/formats/config/common.go
deleted file mode 100644
index d2f1e5c..0000000
--- a/formats/config/common.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package config
-
-// New creates a new config instance.
-func New() *Config {
- return &Config{}
-}
-
-type Config struct {
- Comment *Comment
- Sections Sections
- Includes Includes
-}
-
-type Includes []*Include
-
-// A reference to a included configuration.
-type Include struct {
- Path string
- Config *Config
-}
-
-type Comment string
-
-const (
- NoSubsection = ""
-)
-
-func (c *Config) Section(name string) *Section {
- for i := len(c.Sections) - 1; i >= 0; i-- {
- s := c.Sections[i]
- if s.IsName(name) {
- return s
- }
- }
-
- s := &Section{Name: name}
- c.Sections = append(c.Sections, s)
- return s
-}
-
-// AddOption is a convenience method to add an option to a given
-// section and subsection.
-//
-// Use the NoSubsection constant for the subsection argument
-// if no subsection is wanted.
-func (s *Config) AddOption(section string, subsection string, key string, value string) *Config {
- if subsection == "" {
- s.Section(section).AddOption(key, value)
- } else {
- s.Section(section).Subsection(subsection).AddOption(key, value)
- }
-
- return s
-}
-
-// SetOption is a convenience method to set an option to a given
-// section and subsection.
-//
-// Use the NoSubsection constant for the subsection argument
-// if no subsection is wanted.
-func (s *Config) SetOption(section string, subsection string, key string, value string) *Config {
- if subsection == "" {
- s.Section(section).SetOption(key, value)
- } else {
- s.Section(section).Subsection(subsection).SetOption(key, value)
- }
-
- return s
-}
-
-func (c *Config) RemoveSection(name string) *Config {
- result := Sections{}
- for _, s := range c.Sections {
- if !s.IsName(name) {
- result = append(result, s)
- }
- }
-
- c.Sections = result
- return c
-}
-
-func (c *Config) RemoveSubsection(section string, subsection string) *Config {
- for _, s := range c.Sections {
- if s.IsName(section) {
- result := Subsections{}
- for _, ss := range s.Subsections {
- if !ss.IsName(subsection) {
- result = append(result, ss)
- }
- }
- s.Subsections = result
- }
- }
-
- return c
-}
diff --git a/formats/config/common_test.go b/formats/config/common_test.go
deleted file mode 100644
index 365b53f..0000000
--- a/formats/config/common_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package config
-
-import (
- "testing"
-
- . "gopkg.in/check.v1"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-type CommonSuite struct{}
-
-var _ = Suite(&CommonSuite{})
-
-func (s *CommonSuite) TestConfig_SetOption(c *C) {
- obtained := New().SetOption("section", NoSubsection, "key1", "value1")
- expected := &Config{
- Sections: []*Section{
- {
- Name: "section",
- Options: []*Option{
- {Key: "key1", Value: "value1"},
- },
- },
- },
- }
- c.Assert(obtained, DeepEquals, expected)
- obtained = obtained.SetOption("section", NoSubsection, "key1", "value1")
- c.Assert(obtained, DeepEquals, expected)
-
- obtained = New().SetOption("section", "subsection", "key1", "value1")
- expected = &Config{
- Sections: []*Section{
- {
- Name: "section",
- Subsections: []*Subsection{
- {
- Name: "subsection",
- Options: []*Option{
- {Key: "key1", Value: "value1"},
- },
- },
- },
- },
- },
- }
- c.Assert(obtained, DeepEquals, expected)
- obtained = obtained.SetOption("section", "subsection", "key1", "value1")
- c.Assert(obtained, DeepEquals, expected)
-}
-
-func (s *CommonSuite) TestConfig_AddOption(c *C) {
- obtained := New().AddOption("section", NoSubsection, "key1", "value1")
- expected := &Config{
- Sections: []*Section{
- {
- Name: "section",
- Options: []*Option{
- {Key: "key1", Value: "value1"},
- },
- },
- },
- }
- c.Assert(obtained, DeepEquals, expected)
-}
-
-func (s *CommonSuite) TestConfig_RemoveSection(c *C) {
- sect := New().
- AddOption("section1", NoSubsection, "key1", "value1").
- AddOption("section2", NoSubsection, "key1", "value1")
- expected := New().
- AddOption("section1", NoSubsection, "key1", "value1")
- c.Assert(sect.RemoveSection("other"), DeepEquals, sect)
- c.Assert(sect.RemoveSection("section2"), DeepEquals, expected)
-}
-
-func (s *CommonSuite) TestConfig_RemoveSubsection(c *C) {
- sect := New().
- AddOption("section1", "sub1", "key1", "value1").
- AddOption("section1", "sub2", "key1", "value1")
- expected := New().
- AddOption("section1", "sub1", "key1", "value1")
- c.Assert(sect.RemoveSubsection("section1", "other"), DeepEquals, sect)
- c.Assert(sect.RemoveSubsection("other", "other"), DeepEquals, sect)
- c.Assert(sect.RemoveSubsection("section1", "sub2"), DeepEquals, expected)
-}
diff --git a/formats/config/decoder.go b/formats/config/decoder.go
deleted file mode 100644
index 0f02ce1..0000000
--- a/formats/config/decoder.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package config
-
-import (
- "io"
-
- "github.com/src-d/gcfg"
-)
-
-// A Decoder reads and decodes config files from an input stream.
-type Decoder struct {
- io.Reader
-}
-
-// NewDecoder returns a new decoder that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{r}
-}
-
-// Decode reads the whole config from its input and stores it in the
-// value pointed to by config.
-func (d *Decoder) Decode(config *Config) error {
- cb := func(s string, ss string, k string, v string, bv bool) error {
- if ss == "" && k == "" {
- config.Section(s)
- return nil
- }
-
- if ss != "" && k == "" {
- config.Section(s).Subsection(ss)
- return nil
- }
-
- config.AddOption(s, ss, k, v)
- return nil
- }
- return gcfg.ReadWithCallback(d, cb)
-}
diff --git a/formats/config/decoder_test.go b/formats/config/decoder_test.go
deleted file mode 100644
index 412549f..0000000
--- a/formats/config/decoder_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package config
-
-import (
- "bytes"
-
- . "gopkg.in/check.v1"
-)
-
-type DecoderSuite struct{}
-
-var _ = Suite(&DecoderSuite{})
-
-func (s *DecoderSuite) TestDecode(c *C) {
- for idx, fixture := range fixtures {
- r := bytes.NewReader([]byte(fixture.Raw))
- d := NewDecoder(r)
- cfg := &Config{}
- err := d.Decode(cfg)
- c.Assert(err, IsNil, Commentf("decoder error for fixture: %d", idx))
- c.Assert(cfg, DeepEquals, fixture.Config, Commentf("bad result for fixture: %d", idx))
- }
-}
-
-func (s *DecoderSuite) TestDecodeFailsWithIdentBeforeSection(c *C) {
- t := `
- key=value
- [section]
- key=value
- `
- decodeFails(c, t)
-}
-
-func (s *DecoderSuite) TestDecodeFailsWithEmptySectionName(c *C) {
- t := `
- []
- key=value
- `
- decodeFails(c, t)
-}
-
-func (s *DecoderSuite) TestDecodeFailsWithEmptySubsectionName(c *C) {
- t := `
- [remote ""]
- key=value
- `
- decodeFails(c, t)
-}
-
-func (s *DecoderSuite) TestDecodeFailsWithBadSubsectionName(c *C) {
- t := `
- [remote origin"]
- key=value
- `
- decodeFails(c, t)
- t = `
- [remote "origin]
- key=value
- `
- decodeFails(c, t)
-}
-
-func (s *DecoderSuite) TestDecodeFailsWithTrailingGarbage(c *C) {
- t := `
- [remote]garbage
- key=value
- `
- decodeFails(c, t)
- t = `
- [remote "origin"]garbage
- key=value
- `
- decodeFails(c, t)
-}
-
-func (s *DecoderSuite) TestDecodeFailsWithGarbage(c *C) {
- decodeFails(c, "---")
- decodeFails(c, "????")
- decodeFails(c, "[sect\nkey=value")
- decodeFails(c, "sect]\nkey=value")
- decodeFails(c, `[section]key="value`)
- decodeFails(c, `[section]key=value"`)
-}
-
-func decodeFails(c *C, text string) {
- r := bytes.NewReader([]byte(text))
- d := NewDecoder(r)
- cfg := &Config{}
- err := d.Decode(cfg)
- c.Assert(err, NotNil)
-}
diff --git a/formats/config/doc.go b/formats/config/doc.go
deleted file mode 100644
index dd77fbc..0000000
--- a/formats/config/doc.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Package config implements decoding/encoding of git config files.
-package config
-
-/*
-
-CONFIGURATION FILE
-------------------
-
-The Git configuration file contains a number of variables that affect
-the Git commands' behavior. The `.git/config` file in each repository
-is used to store the configuration for that repository, and
-`$HOME/.gitconfig` is used to store a per-user configuration as
-fallback values for the `.git/config` file. The file `/etc/gitconfig`
-can be used to store a system-wide default configuration.
-
-The configuration variables are used by both the Git plumbing
-and the porcelains. The variables are divided into sections, wherein
-the fully qualified variable name of the variable itself is the last
-dot-separated segment and the section name is everything before the last
-dot. The variable names are case-insensitive, allow only alphanumeric
-characters and `-`, and must start with an alphabetic character. Some
-variables may appear multiple times; we say then that the variable is
-multivalued.
-
-Syntax
-~~~~~~
-
-The syntax is fairly flexible and permissive; whitespaces are mostly
-ignored. The '#' and ';' characters begin comments to the end of line,
-blank lines are ignored.
-
-The file consists of sections and variables. A section begins with
-the name of the section in square brackets and continues until the next
-section begins. Section names are case-insensitive. Only alphanumeric
-characters, `-` and `.` are allowed in section names. Each variable
-must belong to some section, which means that there must be a section
-header before the first setting of a variable.
-
-Sections can be further divided into subsections. To begin a subsection
-put its name in double quotes, separated by space from the section name,
-in the section header, like in the example below:
-
---------
- [section "subsection"]
-
---------
-
-Subsection names are case sensitive and can contain any characters except
-newline (doublequote `"` and backslash can be included by escaping them
-as `\"` and `\\`, respectively). Section headers cannot span multiple
-lines. Variables may belong directly to a section or to a given subsection.
-You can have `[section]` if you have `[section "subsection"]`, but you
-don't need to.
-
-There is also a deprecated `[section.subsection]` syntax. With this
-syntax, the subsection name is converted to lower-case and is also
-compared case sensitively. These subsection names follow the same
-restrictions as section names.
-
-All the other lines (and the remainder of the line after the section
-header) are recognized as setting variables, in the form
-'name = value' (or just 'name', which is a short-hand to say that
-the variable is the boolean "true").
-The variable names are case-insensitive, allow only alphanumeric characters
-and `-`, and must start with an alphabetic character.
-
-A line that defines a value can be continued to the next line by
-ending it with a `\`; the backquote and the end-of-line are
-stripped. Leading whitespaces after 'name =', the remainder of the
-line after the first comment character '#' or ';', and trailing
-whitespaces of the line are discarded unless they are enclosed in
-double quotes. Internal whitespaces within the value are retained
-verbatim.
-
-Inside double quotes, double quote `"` and backslash `\` characters
-must be escaped: use `\"` for `"` and `\\` for `\`.
-
-The following escape sequences (beside `\"` and `\\`) are recognized:
-`\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB)
-and `\b` for backspace (BS). Other char escape sequences (including octal
-escape sequences) are invalid.
-
-
-Includes
-~~~~~~~~
-
-You can include one config file from another by setting the special
-`include.path` variable to the name of the file to be included. The
-variable takes a pathname as its value, and is subject to tilde
-expansion.
-
-The
-included file is expanded immediately, as if its contents had been
-found at the location of the include directive. If the value of the
-`include.path` variable is a relative path, the path is considered to be
-relative to the configuration file in which the include directive was
-found. See below for examples.
-
-
-Example
-~~~~~~~
-
- # Core variables
- [core]
- ; Don't trust file modes
- filemode = false
-
- # Our diff algorithm
- [diff]
- external = /usr/local/bin/diff-wrapper
- renames = true
-
- [branch "devel"]
- remote = origin
- merge = refs/heads/devel
-
- # Proxy settings
- [core]
- gitProxy="ssh" for "kernel.org"
- gitProxy=default-proxy ; for the rest
-
- [include]
- path = /path/to/foo.inc ; include by absolute path
- path = foo ; expand "foo" relative to the current file
- path = ~/foo ; expand "foo" in your `$HOME` directory
-
-
-Values
-~~~~~~
-
-Values of many variables are treated as a simple string, but there
-are variables that take values of specific types and there are rules
-as to how to spell them.
-
-boolean::
-
- When a variable is said to take a boolean value, many
- synonyms are accepted for 'true' and 'false'; these are all
- case-insensitive.
-
- true;; Boolean true can be spelled as `yes`, `on`, `true`,
- or `1`. Also, a variable defined without `= <value>`
- is taken as true.
-
- false;; Boolean false can be spelled as `no`, `off`,
- `false`, or `0`.
-+
-When converting value to the canonical form using `--bool` type
-specifier; 'git config' will ensure that the output is "true" or
-"false" (spelled in lowercase).
-
-integer::
- The value for many variables that specify various sizes can
- be suffixed with `k`, `M`,... to mean "scale the number by
- 1024", "by 1024x1024", etc.
-
-color::
- The value for a variable that takes a color is a list of
- colors (at most two, one for foreground and one for background)
- and attributes (as many as you want), separated by spaces.
-+
-The basic colors accepted are `normal`, `black`, `red`, `green`, `yellow`,
-`blue`, `magenta`, `cyan` and `white`. The first color given is the
-foreground; the second is the background.
-+
-Colors may also be given as numbers between 0 and 255; these use ANSI
-256-color mode (but note that not all terminals may support this). If
-your terminal supports it, you may also specify 24-bit RGB values as
-hex, like `#ff0ab3`.
-+
-
-From: https://git-scm.com/docs/git-config
-The accepted attributes are `bold`, `dim`, `ul`, `blink`, `reverse`,
-`italic`, and `strike` (for crossed-out or "strikethrough" letters).
-The position of any attributes with respect to the colors
-(before, after, or in between), doesn't matter. Specific attributes may
-be turned off by prefixing them with `no` or `no-` (e.g., `noreverse`,
-`no-ul`, etc).
-+
-For git's pre-defined color slots, the attributes are meant to be reset
-at the beginning of each item in the colored output. So setting
-`color.decorate.branch` to `black` will paint that branch name in a
-plain `black`, even if the previous thing on the same output line (e.g.
-opening parenthesis before the list of branch names in `log --decorate`
-output) is set to be painted with `bold` or some other attribute.
-However, custom log formats may do more complicated and layered
-coloring, and the negated forms may be useful there.
-
-pathname::
- A variable that takes a pathname value can be given a
- string that begins with "`~/`" or "`~user/`", and the usual
- tilde expansion happens to such a string: `~/`
- is expanded to the value of `$HOME`, and `~user/` to the
- specified user's home directory.
-
-From:
-https://raw.githubusercontent.com/git/git/659889482ac63411daea38b2c3d127842ea04e4d/Documentation/config.txt
-
-*/
diff --git a/formats/config/encoder.go b/formats/config/encoder.go
deleted file mode 100644
index 88bdf65..0000000
--- a/formats/config/encoder.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package config
-
-import (
- "fmt"
- "io"
-)
-
-// An Encoder writes config files to an output stream.
-type Encoder struct {
- w io.Writer
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{w}
-}
-
-// Encode writes the config in git config format to the stream of the encoder.
-func (e *Encoder) Encode(cfg *Config) error {
- for _, s := range cfg.Sections {
- if err := e.encodeSection(s); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeSection(s *Section) error {
- if len(s.Options) > 0 {
- if err := e.printf("[%s]\n", s.Name); err != nil {
- return err
- }
-
- if err := e.encodeOptions(s.Options); err != nil {
- return err
- }
- }
-
- for _, ss := range s.Subsections {
- if err := e.encodeSubsection(s.Name, ss); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
- //TODO: escape
- if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil {
- return err
- }
-
- if err := e.encodeOptions(s.Options); err != nil {
- return err
- }
-
- return nil
-}
-
-func (e *Encoder) encodeOptions(opts Options) error {
- for _, o := range opts {
- if err := e.printf("\t%s = %s\n", o.Key, o.Value); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) printf(msg string, args ...interface{}) error {
- _, err := fmt.Fprintf(e.w, msg, args...)
- return err
-}
diff --git a/formats/config/encoder_test.go b/formats/config/encoder_test.go
deleted file mode 100644
index 5335b83..0000000
--- a/formats/config/encoder_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package config
-
-import (
- "bytes"
-
- . "gopkg.in/check.v1"
-)
-
-type EncoderSuite struct{}
-
-var _ = Suite(&EncoderSuite{})
-
-func (s *EncoderSuite) TestEncode(c *C) {
- for idx, fixture := range fixtures {
- buf := &bytes.Buffer{}
- e := NewEncoder(buf)
- err := e.Encode(fixture.Config)
- c.Assert(err, IsNil, Commentf("encoder error for fixture: %d", idx))
- c.Assert(buf.String(), Equals, fixture.Text, Commentf("bad result for fixture: %d", idx))
- }
-}
diff --git a/formats/config/fixtures_test.go b/formats/config/fixtures_test.go
deleted file mode 100644
index 12ff288..0000000
--- a/formats/config/fixtures_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package config
-
-type Fixture struct {
- Text string
- Raw string
- Config *Config
-}
-
-var fixtures = []*Fixture{
- {
- Raw: "",
- Text: "",
- Config: New(),
- },
- {
- Raw: ";Comments only",
- Text: "",
- Config: New(),
- },
- {
- Raw: "#Comments only",
- Text: "",
- Config: New(),
- },
- {
- Raw: "[core]\nrepositoryformatversion=0",
- Text: "[core]\n\trepositoryformatversion = 0\n",
- Config: New().AddOption("core", "", "repositoryformatversion", "0"),
- },
- {
- Raw: "[core]\n\trepositoryformatversion = 0\n",
- Text: "[core]\n\trepositoryformatversion = 0\n",
- Config: New().AddOption("core", "", "repositoryformatversion", "0"),
- },
- {
- Raw: ";Commment\n[core]\n;Comment\nrepositoryformatversion = 0\n",
- Text: "[core]\n\trepositoryformatversion = 0\n",
- Config: New().AddOption("core", "", "repositoryformatversion", "0"),
- },
- {
- Raw: "#Commment\n#Comment\n[core]\n#Comment\nrepositoryformatversion = 0\n",
- Text: "[core]\n\trepositoryformatversion = 0\n",
- Config: New().AddOption("core", "", "repositoryformatversion", "0"),
- },
- {
- Raw: `
- [sect1]
- opt1 = value1
- [sect1 "subsect1"]
- opt2 = value2
- `,
- Text: `[sect1]
- opt1 = value1
-[sect1 "subsect1"]
- opt2 = value2
-`,
- Config: New().
- AddOption("sect1", "", "opt1", "value1").
- AddOption("sect1", "subsect1", "opt2", "value2"),
- },
- {
- Raw: `
- [sect1]
- opt1 = value1
- [sect1 "subsect1"]
- opt2 = value2
- [sect1]
- opt1 = value1b
- [sect1 "subsect1"]
- opt2 = value2b
- [sect1 "subsect2"]
- opt2 = value2
- `,
- Text: `[sect1]
- opt1 = value1
- opt1 = value1b
-[sect1 "subsect1"]
- opt2 = value2
- opt2 = value2b
-[sect1 "subsect2"]
- opt2 = value2
-`,
- Config: New().
- AddOption("sect1", "", "opt1", "value1").
- AddOption("sect1", "", "opt1", "value1b").
- AddOption("sect1", "subsect1", "opt2", "value2").
- AddOption("sect1", "subsect1", "opt2", "value2b").
- AddOption("sect1", "subsect2", "opt2", "value2"),
- },
-}
diff --git a/formats/config/option.go b/formats/config/option.go
deleted file mode 100644
index dbb401c..0000000
--- a/formats/config/option.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package config
-
-import (
- "strings"
-)
-
-type Option struct {
- // Key preserving original caseness.
- // Use IsKey instead to compare key regardless of caseness.
- Key string
- // Original value as string, could be not notmalized.
- Value string
-}
-
-type Options []*Option
-
-// IsKey returns true if the given key matches
-// this options' key in a case-insensitive comparison.
-func (o *Option) IsKey(key string) bool {
- return strings.ToLower(o.Key) == strings.ToLower(key)
-}
-
-// Get gets the value for the given key if set,
-// otherwise it returns the empty string.
-//
-// Note that there is no difference
-//
-// This matches git behaviour since git v1.8.1-rc1,
-// if there are multiple definitions of a key, the
-// last one wins.
-//
-// See: http://article.gmane.org/gmane.linux.kernel/1407184
-//
-// In order to get all possible values for the same key,
-// use GetAll.
-func (opts Options) Get(key string) string {
- for i := len(opts) - 1; i >= 0; i-- {
- o := opts[i]
- if o.IsKey(key) {
- return o.Value
- }
- }
- return ""
-}
-
-// GetAll returns all possible values for the same key.
-func (opts Options) GetAll(key string) []string {
- result := []string{}
- for _, o := range opts {
- if o.IsKey(key) {
- result = append(result, o.Value)
- }
- }
- return result
-}
-
-func (opts Options) withoutOption(key string) Options {
- result := Options{}
- for _, o := range opts {
- if !o.IsKey(key) {
- result = append(result, o)
- }
- }
- return result
-}
-
-func (opts Options) withAddedOption(key string, value string) Options {
- return append(opts, &Option{key, value})
-}
-
-func (opts Options) withSettedOption(key string, value string) Options {
- for i := len(opts) - 1; i >= 0; i-- {
- o := opts[i]
- if o.IsKey(key) {
- result := make(Options, len(opts))
- copy(result, opts)
- result[i] = &Option{key, value}
- return result
- }
- }
-
- return opts.withAddedOption(key, value)
-}
diff --git a/formats/config/option_test.go b/formats/config/option_test.go
deleted file mode 100644
index 8588de1..0000000
--- a/formats/config/option_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package config
-
-import (
- . "gopkg.in/check.v1"
-)
-
-type OptionSuite struct{}
-
-var _ = Suite(&OptionSuite{})
-
-func (s *OptionSuite) TestOptions_GetAll(c *C) {
- o := Options{
- &Option{"k", "v"},
- &Option{"ok", "v1"},
- &Option{"K", "v2"},
- }
- c.Assert(o.GetAll("k"), DeepEquals, []string{"v", "v2"})
- c.Assert(o.GetAll("K"), DeepEquals, []string{"v", "v2"})
- c.Assert(o.GetAll("ok"), DeepEquals, []string{"v1"})
- c.Assert(o.GetAll("unexistant"), DeepEquals, []string{})
-
- o = Options{}
- c.Assert(o.GetAll("k"), DeepEquals, []string{})
-}
-
-func (s *OptionSuite) TestOption_IsKey(c *C) {
- c.Assert((&Option{Key: "key"}).IsKey("key"), Equals, true)
- c.Assert((&Option{Key: "key"}).IsKey("KEY"), Equals, true)
- c.Assert((&Option{Key: "KEY"}).IsKey("key"), Equals, true)
- c.Assert((&Option{Key: "key"}).IsKey("other"), Equals, false)
- c.Assert((&Option{Key: "key"}).IsKey(""), Equals, false)
- c.Assert((&Option{Key: ""}).IsKey("key"), Equals, false)
-}
diff --git a/formats/config/section.go b/formats/config/section.go
deleted file mode 100644
index 1844913..0000000
--- a/formats/config/section.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package config
-
-import "strings"
-
-type Section struct {
- Name string
- Options Options
- Subsections Subsections
-}
-
-type Subsection struct {
- Name string
- Options Options
-}
-
-type Sections []*Section
-
-type Subsections []*Subsection
-
-func (s *Section) IsName(name string) bool {
- return strings.ToLower(s.Name) == strings.ToLower(name)
-}
-
-func (s *Section) Option(key string) string {
- return s.Options.Get(key)
-}
-
-func (s *Section) AddOption(key string, value string) *Section {
- s.Options = s.Options.withAddedOption(key, value)
- return s
-}
-
-func (s *Section) SetOption(key string, value string) *Section {
- s.Options = s.Options.withSettedOption(key, value)
- return s
-}
-
-func (s *Section) RemoveOption(key string) *Section {
- s.Options = s.Options.withoutOption(key)
- return s
-}
-
-func (s *Section) Subsection(name string) *Subsection {
- for i := len(s.Subsections) - 1; i >= 0; i-- {
- ss := s.Subsections[i]
- if ss.IsName(name) {
- return ss
- }
- }
-
- ss := &Subsection{Name: name}
- s.Subsections = append(s.Subsections, ss)
- return ss
-}
-
-func (s *Section) HasSubsection(name string) bool {
- for _, ss := range s.Subsections {
- if ss.IsName(name) {
- return true
- }
- }
-
- return false
-}
-
-func (s *Subsection) IsName(name string) bool {
- return s.Name == name
-}
-
-func (s *Subsection) Option(key string) string {
- return s.Options.Get(key)
-}
-
-func (s *Subsection) AddOption(key string, value string) *Subsection {
- s.Options = s.Options.withAddedOption(key, value)
- return s
-}
-
-func (s *Subsection) SetOption(key string, value string) *Subsection {
- s.Options = s.Options.withSettedOption(key, value)
- return s
-}
-
-func (s *Subsection) RemoveOption(key string) *Subsection {
- s.Options = s.Options.withoutOption(key)
- return s
-}
diff --git a/formats/config/section_test.go b/formats/config/section_test.go
deleted file mode 100644
index cfd9f3f..0000000
--- a/formats/config/section_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package config
-
-import (
- . "gopkg.in/check.v1"
-)
-
-type SectionSuite struct{}
-
-var _ = Suite(&SectionSuite{})
-
-func (s *SectionSuite) TestSection_Option(c *C) {
- sect := &Section{
- Options: []*Option{
- {Key: "key1", Value: "value1"},
- {Key: "key2", Value: "value2"},
- {Key: "key1", Value: "value3"},
- },
- }
- c.Assert(sect.Option("otherkey"), Equals, "")
- c.Assert(sect.Option("key2"), Equals, "value2")
- c.Assert(sect.Option("key1"), Equals, "value3")
-}
-
-func (s *SectionSuite) TestSubsection_Option(c *C) {
- sect := &Subsection{
- Options: []*Option{
- {Key: "key1", Value: "value1"},
- {Key: "key2", Value: "value2"},
- {Key: "key1", Value: "value3"},
- },
- }
- c.Assert(sect.Option("otherkey"), Equals, "")
- c.Assert(sect.Option("key2"), Equals, "value2")
- c.Assert(sect.Option("key1"), Equals, "value3")
-}
-
-func (s *SectionSuite) TestSection_RemoveOption(c *C) {
- sect := &Section{
- Options: []*Option{
- {Key: "key1", Value: "value1"},
- {Key: "key2", Value: "value2"},
- {Key: "key1", Value: "value3"},
- },
- }
- c.Assert(sect.RemoveOption("otherkey"), DeepEquals, sect)
-
- expected := &Section{
- Options: []*Option{
- {Key: "key2", Value: "value2"},
- },
- }
- c.Assert(sect.RemoveOption("key1"), DeepEquals, expected)
-}
-
-func (s *SectionSuite) TestSubsection_RemoveOption(c *C) {
- sect := &Subsection{
- Options: []*Option{
- {Key: "key1", Value: "value1"},
- {Key: "key2", Value: "value2"},
- {Key: "key1", Value: "value3"},
- },
- }
- c.Assert(sect.RemoveOption("otherkey"), DeepEquals, sect)
-
- expected := &Subsection{
- Options: []*Option{
- {Key: "key2", Value: "value2"},
- },
- }
- c.Assert(sect.RemoveOption("key1"), DeepEquals, expected)
-}
diff --git a/formats/idxfile/decoder.go b/formats/idxfile/decoder.go
deleted file mode 100644
index 884d32b..0000000
--- a/formats/idxfile/decoder.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package idxfile
-
-import (
- "bytes"
- "errors"
- "io"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-var (
- // ErrUnsupportedVersion is returned by Decode when the idx file version
- // is not supported.
- ErrUnsupportedVersion = errors.New("Unsuported version")
- // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
- ErrMalformedIdxFile = errors.New("Malformed IDX file")
-)
-
-// A Decoder reads and decodes idx files from an input stream.
-type Decoder struct {
- io.Reader
-}
-
-// NewDecoder returns a new decoder that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{r}
-}
-
-// Decode reads the whole idx object from its input and stores it in the
-// value pointed to by idx.
-func (d *Decoder) Decode(idx *Idxfile) error {
- if err := validateHeader(d); err != nil {
- return err
- }
-
- flow := []func(*Idxfile, io.Reader) error{
- readVersion,
- readFanout,
- readObjectNames,
- readCRC32,
- readOffsets,
- readChecksums,
- }
-
- for _, f := range flow {
- if err := f(idx, d); err != nil {
- return err
- }
- }
-
- if !idx.isValid() {
- return ErrMalformedIdxFile
- }
-
- return nil
-}
-
-func validateHeader(r io.Reader) error {
- var h = make([]byte, 4)
- if _, err := r.Read(h); err != nil {
- return err
- }
-
- if !bytes.Equal(h, idxHeader) {
- return ErrMalformedIdxFile
- }
-
- return nil
-}
-
-func readVersion(idx *Idxfile, r io.Reader) error {
- v, err := binary.ReadUint32(r)
- if err != nil {
- return err
- }
-
- if v > VersionSupported {
- return ErrUnsupportedVersion
- }
-
- idx.Version = v
- return nil
-}
-
-func readFanout(idx *Idxfile, r io.Reader) error {
- var err error
- for i := 0; i < 255; i++ {
- idx.Fanout[i], err = binary.ReadUint32(r)
- if err != nil {
- return err
- }
- }
-
- idx.ObjectCount, err = binary.ReadUint32(r)
- return err
-}
-
-func readObjectNames(idx *Idxfile, r io.Reader) error {
- c := int(idx.ObjectCount)
- for i := 0; i < c; i++ {
- var ref core.Hash
- if _, err := r.Read(ref[:]); err != nil {
- return err
- }
-
- idx.Entries = append(idx.Entries, Entry{Hash: ref})
- }
-
- return nil
-}
-
-func readCRC32(idx *Idxfile, r io.Reader) error {
- c := int(idx.ObjectCount)
- for i := 0; i < c; i++ {
- if err := binary.Read(r, &idx.Entries[i].CRC32); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readOffsets(idx *Idxfile, r io.Reader) error {
- c := int(idx.ObjectCount)
- for i := 0; i < c; i++ {
- o, err := binary.ReadUint32(r)
- if err != nil {
- return err
- }
-
- idx.Entries[i].Offset = uint64(o)
- }
-
- return nil
-}
-
-func readChecksums(idx *Idxfile, r io.Reader) error {
- if _, err := r.Read(idx.PackfileChecksum[:]); err != nil {
- return err
- }
-
- if _, err := r.Read(idx.IdxChecksum[:]); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/formats/idxfile/decoder_test.go b/formats/idxfile/decoder_test.go
deleted file mode 100644
index 18546d2..0000000
--- a/formats/idxfile/decoder_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package idxfile
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git.v4/fixtures"
- "gopkg.in/src-d/go-git.v4/formats/packfile"
- "gopkg.in/src-d/go-git.v4/storage/memory"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-type IdxfileSuite struct {
- fixtures.Suite
-}
-
-var _ = Suite(&IdxfileSuite{})
-
-func (s *IdxfileSuite) TestDecode(c *C) {
- f := fixtures.Basic().One()
-
- d := NewDecoder(f.Idx())
- idx := &Idxfile{}
- err := d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Entries, HasLen, 31)
- c.Assert(idx.Entries[0].Hash.String(), Equals, "1669dce138d9b841a518c64b10914d88f5e488ea")
- c.Assert(idx.Entries[0].Offset, Equals, uint64(615))
- c.Assert(idx.Entries[0].CRC32, Equals, uint32(3645019190))
-
- c.Assert(fmt.Sprintf("%x", idx.IdxChecksum), Equals, "fb794f1ec720b9bc8e43257451bd99c4be6fa1c9")
- c.Assert(fmt.Sprintf("%x", idx.PackfileChecksum), Equals, f.PackfileHash.String())
-}
-
-func (s *IdxfileSuite) TestDecodeCRCs(c *C) {
- f := fixtures.Basic().ByTag("ofs-delta").One()
-
- scanner := packfile.NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- pd, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- _, err = pd.Decode()
- c.Assert(err, IsNil)
-
- i := &Idxfile{Version: VersionSupported}
-
- offsets := pd.Offsets()
- for h, crc := range pd.CRCs() {
- i.Add(h, uint64(offsets[h]), crc)
- }
-
- buf := bytes.NewBuffer(nil)
- e := NewEncoder(buf)
- _, err = e.Encode(i)
- c.Assert(err, IsNil)
-
- idx := &Idxfile{}
-
- d := NewDecoder(buf)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Entries, DeepEquals, i.Entries)
-}
diff --git a/formats/idxfile/doc.go b/formats/idxfile/doc.go
deleted file mode 100644
index 8a76853..0000000
--- a/formats/idxfile/doc.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Package idxfile implements a encoder/decoder of idx files
-package idxfile
-
-/*
-== Original (version 1) pack-*.idx files have the following format:
-
- - The header consists of 256 4-byte network byte order
- integers. N-th entry of this table records the number of
- objects in the corresponding pack, the first byte of whose
- object name is less than or equal to N. This is called the
- 'first-level fan-out' table.
-
- - The header is followed by sorted 24-byte entries, one entry
- per object in the pack. Each entry is:
-
- 4-byte network byte order integer, recording where the
- object is stored in the packfile as the offset from the
- beginning.
-
- 20-byte object name.
-
- - The file is concluded with a trailer:
-
- A copy of the 20-byte SHA1 checksum at the end of
- corresponding packfile.
-
- 20-byte SHA1-checksum of all of the above.
-
-Pack Idx file:
-
- -- +--------------------------------+
-fanout | fanout[0] = 2 (for example) |-.
-table +--------------------------------+ |
- | fanout[1] | |
- +--------------------------------+ |
- | fanout[2] | |
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
- | fanout[255] = total objects |---.
- -- +--------------------------------+ | |
-main | offset | | |
-index | object name 00XXXXXXXXXXXXXXXX | | |
-table +--------------------------------+ | |
- | offset | | |
- | object name 00XXXXXXXXXXXXXXXX | | |
- +--------------------------------+<+ |
- .-| offset | |
- | | object name 01XXXXXXXXXXXXXXXX | |
- | +--------------------------------+ |
- | | offset | |
- | | object name 01XXXXXXXXXXXXXXXX | |
- | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
- | | offset | |
- | | object name FFXXXXXXXXXXXXXXXX | |
- --| +--------------------------------+<--+
-trailer | | packfile checksum |
- | +--------------------------------+
- | | idxfile checksum |
- | +--------------------------------+
- .-------.
- |
-Pack file entry: <+
-
- packed object header:
- 1-byte size extension bit (MSB)
- type (next 3 bit)
- size0 (lower 4-bit)
- n-byte sizeN (as long as MSB is set, each 7-bit)
- size0..sizeN form 4+7+7+..+7 bit integer, size0
- is the least significant part, and sizeN is the
- most significant part.
- packed object data:
- If it is not DELTA, then deflated bytes (the size above
- is the size before compression).
- If it is REF_DELTA, then
- 20-byte base object name SHA1 (the size above is the
- size of the delta data that follows).
- delta data, deflated.
- If it is OFS_DELTA, then
- n-byte offset (see below) interpreted as a negative
- offset from the type-byte of the header of the
- ofs-delta entry (the size above is the size of
- the delta data that follows).
- delta data, deflated.
-
- offset encoding:
- n bytes with MSB set in all but the last one.
- The offset is then the number constructed by
- concatenating the lower 7 bit of each byte, and
- for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
- to the result.
-
-
-
-== Version 2 pack-*.idx files support packs larger than 4 GiB, and
- have some other reorganizations. They have the format:
-
- - A 4-byte magic number '\377tOc' which is an unreasonable
- fanout[0] value.
-
- - A 4-byte version number (= 2)
-
- - A 256-entry fan-out table just like v1.
-
- - A table of sorted 20-byte SHA1 object names. These are
- packed together without offset values to reduce the cache
- footprint of the binary search for a specific object name.
-
- - A table of 4-byte CRC32 values of the packed object data.
- This is new in v2 so compressed data can be copied directly
- from pack to pack during repacking without undetected
- data corruption.
-
- - A table of 4-byte offset values (in network byte order).
- These are usually 31-bit pack file offsets, but large
- offsets are encoded as an index into the next table with
- the msbit set.
-
- - A table of 8-byte offset entries (empty for pack files less
- than 2 GiB). Pack files are organized with heavily used
- objects toward the front, so most object references should
- not need to refer to this table.
-
- - The same trailer as a v1 pack file:
-
- A copy of the 20-byte SHA1 checksum at the end of
- corresponding packfile.
-
- 20-byte SHA1-checksum of all of the above.
-
-From:
-https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt
-*/
diff --git a/formats/idxfile/encoder.go b/formats/idxfile/encoder.go
deleted file mode 100644
index 164414a..0000000
--- a/formats/idxfile/encoder.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package idxfile
-
-import (
- "crypto/sha1"
- "hash"
- "io"
- "sort"
-
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-// An Encoder writes idx files to an output stream.
-type Encoder struct {
- io.Writer
- hash hash.Hash
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- h := sha1.New()
- mw := io.MultiWriter(w, h)
- return &Encoder{mw, h}
-}
-
-// Encode writes the idx in an idx file format to the stream of the encoder.
-func (e *Encoder) Encode(idx *Idxfile) (int, error) {
- idx.Entries.Sort()
-
- flow := []func(*Idxfile) (int, error){
- e.encodeHeader,
- e.encodeFanout,
- e.encodeHashes,
- e.encodeCRC32,
- e.encodeOffsets,
- e.encodeChecksums,
- }
-
- sz := 0
- for _, f := range flow {
- i, err := f(idx)
- sz += i
-
- if err != nil {
- return sz, err
- }
- }
-
- return sz, nil
-}
-
-func (e *Encoder) encodeHeader(idx *Idxfile) (int, error) {
- c, err := e.Write(idxHeader)
- if err != nil {
- return c, err
- }
-
- return c + 4, binary.WriteUint32(e, idx.Version)
-}
-
-func (e *Encoder) encodeFanout(idx *Idxfile) (int, error) {
- fanout := idx.calculateFanout()
- for _, c := range fanout {
- if err := binary.WriteUint32(e, c); err != nil {
- return 0, err
- }
- }
-
- return 1024, nil
-}
-
-func (e *Encoder) encodeHashes(idx *Idxfile) (int, error) {
- sz := 0
- for _, ent := range idx.Entries {
- i, err := e.Write(ent.Hash[:])
- sz += i
-
- if err != nil {
- return sz, err
- }
- }
-
- return sz, nil
-}
-
-func (e *Encoder) encodeCRC32(idx *Idxfile) (int, error) {
- sz := 0
- for _, ent := range idx.Entries {
- err := binary.Write(e, ent.CRC32)
- sz += 4
-
- if err != nil {
- return sz, err
- }
- }
-
- return sz, nil
-}
-
-func (e *Encoder) encodeOffsets(idx *Idxfile) (int, error) {
- sz := 0
- for _, ent := range idx.Entries {
- if err := binary.WriteUint32(e, uint32(ent.Offset)); err != nil {
- return sz, err
- }
-
- sz += 4
-
- }
-
- return sz, nil
-}
-
-func (e *Encoder) encodeChecksums(idx *Idxfile) (int, error) {
- if _, err := e.Write(idx.PackfileChecksum[:]); err != nil {
- return 0, err
- }
-
- copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20])
- if _, err := e.Write(idx.IdxChecksum[:]); err != nil {
- return 0, err
- }
-
- return 40, nil
-}
-
-type EntryList []Entry
-
-func (p EntryList) Len() int { return len(p) }
-func (p EntryList) Less(i, j int) bool { return p[i].Hash.String() < p[j].Hash.String() }
-func (p EntryList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p EntryList) Sort() { sort.Sort(p) }
diff --git a/formats/idxfile/encoder_test.go b/formats/idxfile/encoder_test.go
deleted file mode 100644
index d9d83eb..0000000
--- a/formats/idxfile/encoder_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package idxfile
-
-import (
- "bytes"
- "io/ioutil"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/fixtures"
-)
-
-func (s *IdxfileSuite) TestEncode(c *C) {
- expected := &Idxfile{}
- expected.Add(core.NewHash("4bfc730165c370df4a012afbb45ba3f9c332c0d4"), 82, 82)
- expected.Add(core.NewHash("8fa2238efdae08d83c12ee176fae65ff7c99af46"), 42, 42)
-
- buf := bytes.NewBuffer(nil)
- e := NewEncoder(buf)
- _, err := e.Encode(expected)
- c.Assert(err, IsNil)
-
- idx := &Idxfile{}
- d := NewDecoder(buf)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Entries, DeepEquals, expected.Entries)
-}
-
-func (s *IdxfileSuite) TestDecodeEncode(c *C) {
- fixtures.ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- expected, err := ioutil.ReadAll(f.Idx())
- c.Assert(err, IsNil)
-
- idx := &Idxfile{}
- d := NewDecoder(bytes.NewBuffer(expected))
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- result := bytes.NewBuffer(nil)
- e := NewEncoder(result)
- size, err := e.Encode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(size, Equals, len(expected))
- c.Assert(result.Bytes(), DeepEquals, expected)
- })
-}
diff --git a/formats/idxfile/idxfile.go b/formats/idxfile/idxfile.go
deleted file mode 100644
index 8549d3f..0000000
--- a/formats/idxfile/idxfile.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package idxfile
-
-import "gopkg.in/src-d/go-git.v4/core"
-
-const (
- // VersionSupported is the only idx version supported.
- VersionSupported = 2
-)
-
-var (
- idxHeader = []byte{255, 't', 'O', 'c'}
-)
-
-// An Idxfile represents an idx file in memory.
-type Idxfile struct {
- Version uint32
- Fanout [255]uint32
- ObjectCount uint32
- Entries EntryList
- PackfileChecksum [20]byte
- IdxChecksum [20]byte
-}
-
-// An Entry represents data about an object in the packfile: its hash,
-// offset and CRC32 checksum.
-type Entry struct {
- Hash core.Hash
- CRC32 uint32
- Offset uint64
-}
-
-func (idx *Idxfile) Add(h core.Hash, offset uint64, crc32 uint32) {
- idx.Entries = append(idx.Entries, Entry{
- Hash: h,
- Offset: offset,
- CRC32: crc32,
- })
-}
-
-func (idx *Idxfile) isValid() bool {
- fanout := idx.calculateFanout()
- for k, c := range idx.Fanout {
- if fanout[k] != c {
- return false
- }
- }
-
- return true
-}
-
-func (idx *Idxfile) calculateFanout() [256]uint32 {
- fanout := [256]uint32{}
- for _, e := range idx.Entries {
- fanout[e.Hash[0]]++
- }
-
- for i := 1; i < 256; i++ {
- fanout[i] += fanout[i-1]
- }
-
- return fanout
-}
diff --git a/formats/index/decoder.go b/formats/index/decoder.go
deleted file mode 100644
index f3d4343..0000000
--- a/formats/index/decoder.go
+++ /dev/null
@@ -1,446 +0,0 @@
-package index
-
-import (
- "bytes"
- "crypto/sha1"
- "errors"
- "hash"
- "io"
- "io/ioutil"
- "strconv"
- "time"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-var (
- // DecodeVersionSupported is the range of supported index versions
- DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4}
-
- // ErrMalformedSignature is returned by Decode when the index header file is
- // malformed
- ErrMalformedSignature = errors.New("malformed index signature file")
- // ErrInvalidChecksum is returned by Decode if the SHA1 hash missmatch with
- // the read content
- ErrInvalidChecksum = errors.New("invalid checksum")
-
- errUnknownExtension = errors.New("unknown extension")
-)
-
-const (
- entryHeaderLength = 62
- entryExtended = 0x4000
- entryValid = 0x8000
- nameMask = 0xfff
- intentToAddMask = 1 << 13
- skipWorkTreeMask = 1 << 14
-)
-
-// A Decoder reads and decodes idx files from an input stream.
-type Decoder struct {
- r io.Reader
- hash hash.Hash
- lastEntry *Entry
-}
-
-// NewDecoder returns a new decoder that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- h := sha1.New()
- return &Decoder{
- r: io.TeeReader(r, h),
- hash: h,
- }
-}
-
-// Decode reads the whole index object from its input and stores it in the
-// value pointed to by idx.
-func (d *Decoder) Decode(idx *Index) error {
- var err error
- idx.Version, err = validateHeader(d.r)
- if err != nil {
- return err
- }
-
- entryCount, err := binary.ReadUint32(d.r)
- if err != nil {
- return err
- }
-
- if err := d.readEntries(idx, int(entryCount)); err != nil {
- return err
- }
-
- return d.readExtensions(idx)
-}
-
-func (d *Decoder) readEntries(idx *Index, count int) error {
- for i := 0; i < count; i++ {
- e, err := d.readEntry(idx)
- if err != nil {
- return err
- }
-
- d.lastEntry = e
- idx.Entries = append(idx.Entries, *e)
- }
-
- return nil
-}
-
-func (d *Decoder) readEntry(idx *Index) (*Entry, error) {
- e := &Entry{}
-
- var msec, mnsec, sec, nsec uint32
- var flags uint16
-
- flow := []interface{}{
- &sec, &nsec,
- &msec, &mnsec,
- &e.Dev,
- &e.Inode,
- &e.Mode,
- &e.UID,
- &e.GID,
- &e.Size,
- &e.Hash,
- &flags,
- }
-
- if err := binary.Read(d.r, flow...); err != nil {
- return nil, err
- }
-
- read := entryHeaderLength
- e.CreatedAt = time.Unix(int64(sec), int64(nsec))
- e.ModifiedAt = time.Unix(int64(msec), int64(mnsec))
- e.Stage = Stage(flags>>12) & 0x3
-
- if flags&entryExtended != 0 {
- extended, err := binary.ReadUint16(d.r)
- if err != nil {
- return nil, err
- }
-
- read += 2
- e.IntentToAdd = extended&intentToAddMask != 0
- e.SkipWorktree = extended&skipWorkTreeMask != 0
- }
-
- if err := d.readEntryName(idx, e, flags); err != nil {
- return nil, err
- }
-
- return e, d.padEntry(idx, e, read)
-}
-
-func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error {
- var name string
- var err error
-
- switch idx.Version {
- case 2, 3:
- len := flags & nameMask
- name, err = d.doReadEntryName(len)
- case 4:
- name, err = d.doReadEntryNameV4()
- default:
- return ErrUnsupportedVersion
- }
-
- if err != nil {
- return err
- }
-
- e.Name = name
- return nil
-}
-
-func (d *Decoder) doReadEntryNameV4() (string, error) {
- l, err := binary.ReadVariableWidthInt(d.r)
- if err != nil {
- return "", err
- }
-
- var base string
- if d.lastEntry != nil {
- base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)]
- }
-
- name, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return "", err
- }
-
- return base + string(name), nil
-}
-
-func (d *Decoder) doReadEntryName(len uint16) (string, error) {
- name := make([]byte, len)
- if err := binary.Read(d.r, &name); err != nil {
- return "", err
- }
-
- return string(name), nil
-}
-
-// Index entries are padded out to the next 8 byte alignment
-// for historical reasons related to how C Git read the files.
-func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
- if idx.Version == 4 {
- return nil
- }
-
- entrySize := read + len(e.Name)
- padLen := 8 - entrySize%8
- if _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)); err != nil {
- return err
- }
-
- return nil
-}
-
-// TODO: support 'Split index' and 'Untracked cache' extensions, take in count
-// that they are not supported by jgit or libgit
-func (d *Decoder) readExtensions(idx *Index) error {
- var expected []byte
- var err error
-
- var header [4]byte
- for {
- expected = d.hash.Sum(nil)
-
- var n int
- if n, err = io.ReadFull(d.r, header[:]); err != nil {
- if n == 0 {
- err = io.EOF
- }
-
- break
- }
-
- err = d.readExtension(idx, header[:])
- if err != nil {
- break
- }
- }
-
- if err != errUnknownExtension {
- return err
- }
-
- return d.readChecksum(expected, header)
-}
-
-func (d *Decoder) readExtension(idx *Index, header []byte) error {
- switch {
- case bytes.Equal(header, treeExtSignature):
- r, err := d.getExtensionReader()
- if err != nil {
- return err
- }
-
- idx.Cache = &Tree{}
- d := &treeExtensionDecoder{r}
- if err := d.Decode(idx.Cache); err != nil {
- return err
- }
- case bytes.Equal(header, resolveUndoExtSignature):
- r, err := d.getExtensionReader()
- if err != nil {
- return err
- }
-
- idx.ResolveUndo = &ResolveUndo{}
- d := &resolveUndoDecoder{r}
- if err := d.Decode(idx.ResolveUndo); err != nil {
- return err
- }
- default:
- return errUnknownExtension
- }
-
- return nil
-}
-
-func (d *Decoder) getExtensionReader() (io.Reader, error) {
- len, err := binary.ReadUint32(d.r)
- if err != nil {
- return nil, err
- }
-
- return &io.LimitedReader{R: d.r, N: int64(len)}, nil
-}
-
-func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
- var h core.Hash
- copy(h[:4], alreadyRead[:])
-
- if err := binary.Read(d.r, h[4:]); err != nil {
- return err
- }
-
- if bytes.Compare(h[:], expected) != 0 {
- return ErrInvalidChecksum
- }
-
- return nil
-}
-
-func validateHeader(r io.Reader) (version uint32, err error) {
- var s = make([]byte, 4)
- if _, err := io.ReadFull(r, s); err != nil {
- return 0, err
- }
-
- if !bytes.Equal(s, indexSignature) {
- return 0, ErrMalformedSignature
- }
-
- version, err = binary.ReadUint32(r)
- if err != nil {
- return 0, err
- }
-
- if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max {
- return 0, ErrUnsupportedVersion
- }
-
- return
-}
-
-type treeExtensionDecoder struct {
- r io.Reader
-}
-
-func (d *treeExtensionDecoder) Decode(t *Tree) error {
- for {
- e, err := d.readEntry()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if e == nil {
- continue
- }
-
- t.Entries = append(t.Entries, *e)
- }
-}
-
-func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
- e := &TreeEntry{}
-
- path, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return nil, err
- }
-
- e.Path = string(path)
-
- count, err := binary.ReadUntil(d.r, ' ')
- if err != nil {
- return nil, err
- }
-
- i, err := strconv.Atoi(string(count))
- if err != nil {
- return nil, err
- }
-
- // An entry can be in an invalidated state and is represented by having a
- // negative number in the entry_count field.
- if i == -1 {
- return nil, nil
- }
-
- e.Entries = i
- trees, err := binary.ReadUntil(d.r, '\n')
- if err != nil {
- return nil, err
- }
-
- i, err = strconv.Atoi(string(trees))
- if err != nil {
- return nil, err
- }
-
- e.Trees = i
-
- if err := binary.Read(d.r, &e.Hash); err != nil {
- return nil, err
- }
-
- return e, nil
-}
-
-type resolveUndoDecoder struct {
- r io.Reader
-}
-
-func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
- for {
- e, err := d.readEntry()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- ru.Entries = append(ru.Entries, *e)
- }
-}
-
-func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
- e := &ResolveUndoEntry{
- Stages: make(map[Stage]core.Hash, 0),
- }
-
- path, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return nil, err
- }
-
- e.Path = string(path)
-
- for i := 0; i < 3; i++ {
- if err := d.readStage(e, Stage(i+1)); err != nil {
- return nil, err
- }
- }
-
- for s := range e.Stages {
- var hash core.Hash
- if err := binary.Read(d.r, hash[:]); err != nil {
- return nil, err
- }
-
- e.Stages[s] = hash
- }
-
- return e, nil
-}
-
-func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
- ascii, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return err
- }
-
- stage, err := strconv.ParseInt(string(ascii), 8, 64)
- if err != nil {
- return err
- }
-
- if stage != 0 {
- e.Stages[s] = core.ZeroHash
- }
-
- return nil
-}
diff --git a/formats/index/decoder_test.go b/formats/index/decoder_test.go
deleted file mode 100644
index a05417d..0000000
--- a/formats/index/decoder_test.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package index
-
-import (
- "testing"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/fixtures"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-type IdxfileSuite struct {
- fixtures.Suite
-}
-
-var _ = Suite(&IdxfileSuite{})
-
-func (s *IdxfileSuite) TestDecode(c *C) {
- f, err := fixtures.Basic().One().DotGit().Open("index")
- c.Assert(err, IsNil)
-
- idx := &Index{}
- d := NewDecoder(f)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Version, Equals, uint32(2))
- c.Assert(idx.Entries, HasLen, 9)
-}
-
-func (s *IdxfileSuite) TestDecodeEntries(c *C) {
- f, err := fixtures.Basic().One().DotGit().Open("index")
- c.Assert(err, IsNil)
-
- idx := &Index{}
- d := NewDecoder(f)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Entries, HasLen, 9)
-
- e := idx.Entries[0]
- c.Assert(e.CreatedAt.Unix(), Equals, int64(1473350251))
- c.Assert(e.CreatedAt.Nanosecond(), Equals, 12059307)
- c.Assert(e.ModifiedAt.Unix(), Equals, int64(1473350251))
- c.Assert(e.ModifiedAt.Nanosecond(), Equals, 12059307)
- c.Assert(e.Dev, Equals, uint32(38))
- c.Assert(e.Inode, Equals, uint32(1715795))
- c.Assert(e.UID, Equals, uint32(1000))
- c.Assert(e.GID, Equals, uint32(100))
- c.Assert(e.Size, Equals, uint32(189))
- c.Assert(e.Hash.String(), Equals, "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88")
- c.Assert(e.Name, Equals, ".gitignore")
- c.Assert(e.Mode.String(), Equals, "-rw-r--r--")
-
- e = idx.Entries[1]
- c.Assert(e.Name, Equals, "CHANGELOG")
-}
-
-func (s *IdxfileSuite) TestDecodeCacheTree(c *C) {
- f, err := fixtures.Basic().One().DotGit().Open("index")
- c.Assert(err, IsNil)
-
- idx := &Index{}
- d := NewDecoder(f)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Entries, HasLen, 9)
- c.Assert(idx.Cache.Entries, HasLen, 5)
-
- for i, expected := range expectedEntries {
- c.Assert(idx.Cache.Entries[i].Path, Equals, expected.Path)
- c.Assert(idx.Cache.Entries[i].Entries, Equals, expected.Entries)
- c.Assert(idx.Cache.Entries[i].Trees, Equals, expected.Trees)
- c.Assert(idx.Cache.Entries[i].Hash.String(), Equals, expected.Hash.String())
- }
-
-}
-
-var expectedEntries = []TreeEntry{
- {Path: "", Entries: 9, Trees: 4, Hash: core.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")},
- {Path: "go", Entries: 1, Trees: 0, Hash: core.NewHash("a39771a7651f97faf5c72e08224d857fc35133db")},
- {Path: "php", Entries: 1, Trees: 0, Hash: core.NewHash("586af567d0bb5e771e49bdd9434f5e0fb76d25fa")},
- {Path: "json", Entries: 2, Trees: 0, Hash: core.NewHash("5a877e6a906a2743ad6e45d99c1793642aaf8eda")},
- {Path: "vendor", Entries: 1, Trees: 0, Hash: core.NewHash("cf4aa3b38974fb7d81f367c0830f7d78d65ab86b")},
-}
-
-func (s *IdxfileSuite) TestDecodeMergeConflict(c *C) {
- f, err := fixtures.Basic().ByTag("merge-conflict").One().DotGit().Open("index")
- c.Assert(err, IsNil)
-
- idx := &Index{}
- d := NewDecoder(f)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Version, Equals, uint32(2))
- c.Assert(idx.Entries, HasLen, 13)
-
- expected := []struct {
- Stage Stage
- Hash string
- }{
- {AncestorMode, "880cd14280f4b9b6ed3986d6671f907d7cc2a198"},
- {OurMode, "d499a1a0b79b7d87a35155afd0c1cce78b37a91c"},
- {TheirMode, "14f8e368114f561c38e134f6e68ea6fea12d77ed"},
- }
-
- // stagged files
- for i, e := range idx.Entries[4:7] {
- c.Assert(e.Stage, Equals, expected[i].Stage)
- c.Assert(e.CreatedAt.Unix(), Equals, int64(0))
- c.Assert(e.CreatedAt.Nanosecond(), Equals, 0)
- c.Assert(e.ModifiedAt.Unix(), Equals, int64(0))
- c.Assert(e.ModifiedAt.Nanosecond(), Equals, 0)
- c.Assert(e.Dev, Equals, uint32(0))
- c.Assert(e.Inode, Equals, uint32(0))
- c.Assert(e.UID, Equals, uint32(0))
- c.Assert(e.GID, Equals, uint32(0))
- c.Assert(e.Size, Equals, uint32(0))
- c.Assert(e.Hash.String(), Equals, expected[i].Hash)
- c.Assert(e.Name, Equals, "go/example.go")
- }
-
-}
-
-func (s *IdxfileSuite) TestDecodeExtendedV3(c *C) {
- f, err := fixtures.Basic().ByTag("intent-to-add").One().DotGit().Open("index")
- c.Assert(err, IsNil)
-
- idx := &Index{}
- d := NewDecoder(f)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Version, Equals, uint32(3))
- c.Assert(idx.Entries, HasLen, 11)
-
- c.Assert(idx.Entries[6].Name, Equals, "intent-to-add")
- c.Assert(idx.Entries[6].IntentToAdd, Equals, true)
- c.Assert(idx.Entries[6].SkipWorktree, Equals, false)
-}
-
-func (s *IdxfileSuite) TestDecodeResolveUndo(c *C) {
- f, err := fixtures.Basic().ByTag("resolve-undo").One().DotGit().Open("index")
- c.Assert(err, IsNil)
-
- idx := &Index{}
- d := NewDecoder(f)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Version, Equals, uint32(2))
- c.Assert(idx.Entries, HasLen, 8)
-
- ru := idx.ResolveUndo
- c.Assert(ru.Entries, HasLen, 2)
- c.Assert(ru.Entries[0].Path, Equals, "go/example.go")
- c.Assert(ru.Entries[0].Stages, HasLen, 3)
- c.Assert(ru.Entries[0].Stages[AncestorMode], Not(Equals), core.ZeroHash)
- c.Assert(ru.Entries[0].Stages[OurMode], Not(Equals), core.ZeroHash)
- c.Assert(ru.Entries[0].Stages[TheirMode], Not(Equals), core.ZeroHash)
- c.Assert(ru.Entries[1].Path, Equals, "haskal/haskal.hs")
- c.Assert(ru.Entries[1].Stages, HasLen, 2)
- c.Assert(ru.Entries[1].Stages[OurMode], Not(Equals), core.ZeroHash)
- c.Assert(ru.Entries[1].Stages[TheirMode], Not(Equals), core.ZeroHash)
-}
-
-func (s *IdxfileSuite) TestDecodeV4(c *C) {
- f, err := fixtures.Basic().ByTag("index-v4").One().DotGit().Open("index")
- c.Assert(err, IsNil)
-
- idx := &Index{}
- d := NewDecoder(f)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Version, Equals, uint32(4))
- c.Assert(idx.Entries, HasLen, 11)
-
- names := []string{
- ".gitignore", "CHANGELOG", "LICENSE", "binary.jpg", "go/example.go",
- "haskal/haskal.hs", "intent-to-add", "json/long.json",
- "json/short.json", "php/crappy.php", "vendor/foo.go",
- }
-
- for i, e := range idx.Entries {
- c.Assert(e.Name, Equals, names[i])
- }
-
- c.Assert(idx.Entries[6].Name, Equals, "intent-to-add")
- c.Assert(idx.Entries[6].IntentToAdd, Equals, true)
- c.Assert(idx.Entries[6].SkipWorktree, Equals, false)
-}
diff --git a/formats/index/doc.go b/formats/index/doc.go
deleted file mode 100644
index 00466af..0000000
--- a/formats/index/doc.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// Package index implements a encoder/decoder of index format files
-package index
-
-/*
-Git index format
-================
-
-== The Git index file has the following format
-
- All binary numbers are in network byte order. Version 2 is described
- here unless stated otherwise.
-
- - A 12-byte header consisting of
-
- 4-byte signature:
- The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache")
-
- 4-byte version number:
- The current supported versions are 2, 3 and 4.
-
- 32-bit number of index entries.
-
- - A number of sorted index entries (see below).
-
- - Extensions
-
- Extensions are identified by signature. Optional extensions can
- be ignored if Git does not understand them.
-
- Git currently supports cached tree and resolve undo extensions.
-
- 4-byte extension signature. If the first byte is 'A'..'Z' the
- extension is optional and can be ignored.
-
- 32-bit size of the extension
-
- Extension data
-
- - 160-bit SHA-1 over the content of the index file before this
- checksum.
-
-== Index entry
-
- Index entries are sorted in ascending order on the name field,
- interpreted as a string of unsigned bytes (i.e. memcmp() order, no
- localization, no special casing of directory separator '/'). Entries
- with the same name are sorted by their stage field.
-
- 32-bit ctime seconds, the last time a file's metadata changed
- this is stat(2) data
-
- 32-bit ctime nanosecond fractions
- this is stat(2) data
-
- 32-bit mtime seconds, the last time a file's data changed
- this is stat(2) data
-
- 32-bit mtime nanosecond fractions
- this is stat(2) data
-
- 32-bit dev
- this is stat(2) data
-
- 32-bit ino
- this is stat(2) data
-
- 32-bit mode, split into (high to low bits)
-
- 4-bit object type
- valid values in binary are 1000 (regular file), 1010 (symbolic link)
- and 1110 (gitlink)
-
- 3-bit unused
-
- 9-bit unix permission. Only 0755 and 0644 are valid for regular files.
- Symbolic links and gitlinks have value 0 in this field.
-
- 32-bit uid
- this is stat(2) data
-
- 32-bit gid
- this is stat(2) data
-
- 32-bit file size
- This is the on-disk size from stat(2), truncated to 32-bit.
-
- 160-bit SHA-1 for the represented object
-
- A 16-bit 'flags' field split into (high to low bits)
-
- 1-bit assume-valid flag
-
- 1-bit extended flag (must be zero in version 2)
-
- 2-bit stage (during merge)
-
- 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF
- is stored in this field.
-
- (Version 3 or later) A 16-bit field, only applicable if the
- "extended flag" above is 1, split into (high to low bits).
-
- 1-bit reserved for future
-
- 1-bit skip-worktree flag (used by sparse checkout)
-
- 1-bit intent-to-add flag (used by "git add -N")
-
- 13-bit unused, must be zero
-
- Entry path name (variable length) relative to top level directory
- (without leading slash). '/' is used as path separator. The special
- path components ".", ".." and ".git" (without quotes) are disallowed.
- Trailing slash is also disallowed.
-
- The exact encoding is undefined, but the '.' and '/' characters
- are encoded in 7-bit ASCII and the encoding cannot contain a NUL
- byte (iow, this is a UNIX pathname).
-
- (Version 4) In version 4, the entry path name is prefix-compressed
- relative to the path name for the previous entry (the very first
- entry is encoded as if the path name for the previous entry is an
- empty string). At the beginning of an entry, an integer N in the
- variable width encoding (the same encoding as the offset is encoded
- for OFS_DELTA pack entries; see pack-format.txt) is stored, followed
- by a NUL-terminated string S. Removing N bytes from the end of the
- path name for the previous entry, and replacing it with the string S
- yields the path name for this entry.
-
- 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes
- while keeping the name NUL-terminated.
-
- (Version 4) In version 4, the padding after the pathname does not
- exist.
-
- Interpretation of index entries in split index mode is completely
- different. See below for details.
-
-== Extensions
-
-=== Cached tree
-
- Cached tree extension contains pre-computed hashes for trees that can
- be derived from the index. It helps speed up tree object generation
- from index for a new commit.
-
- When a path is updated in index, the path must be invalidated and
- removed from tree cache.
-
- The signature for this extension is { 'T', 'R', 'E', 'E' }.
-
- A series of entries fill the entire extension; each of which
- consists of:
-
- - NUL-terminated path component (relative to its parent directory);
-
- - ASCII decimal number of entries in the index that is covered by the
- tree this entry represents (entry_count);
-
- - A space (ASCII 32);
-
- - ASCII decimal number that represents the number of subtrees this
- tree has;
-
- - A newline (ASCII 10); and
-
- - 160-bit object name for the object that would result from writing
- this span of index as a tree.
-
- An entry can be in an invalidated state and is represented by having
- a negative number in the entry_count field. In this case, there is no
- object name and the next entry starts immediately after the newline.
- When writing an invalid entry, -1 should always be used as entry_count.
-
- The entries are written out in the top-down, depth-first order. The
- first entry represents the root level of the repository, followed by the
- first subtree--let's call this A--of the root level (with its name
- relative to the root level), followed by the first subtree of A (with
- its name relative to A), ...
-
-=== Resolve undo
-
- A conflict is represented in the index as a set of higher stage entries.
- When a conflict is resolved (e.g. with "git add path"), these higher
- stage entries will be removed and a stage-0 entry with proper resolution
- is added.
-
- When these higher stage entries are removed, they are saved in the
- resolve undo extension, so that conflicts can be recreated (e.g. with
- "git checkout -m"), in case users want to redo a conflict resolution
- from scratch.
-
- The signature for this extension is { 'R', 'E', 'U', 'C' }.
-
- A series of entries fill the entire extension; each of which
- consists of:
-
- - NUL-terminated pathname the entry describes (relative to the root of
- the repository, i.e. full pathname);
-
- - Three NUL-terminated ASCII octal numbers, entry mode of entries in
- stage 1 to 3 (a missing stage is represented by "0" in this field);
- and
-
- - At most three 160-bit object names of the entry in stages from 1 to 3
- (nothing is written for a missing stage).
-
-=== Split index
-
- In split index mode, the majority of index entries could be stored
- in a separate file. This extension records the changes to be made on
- top of that to produce the final index.
-
- The signature for this extension is { 'l', 'i', 'n', 'k' }.
-
- The extension consists of:
-
- - 160-bit SHA-1 of the shared index file. The shared index file path
- is $GIT_DIR/sharedindex.<SHA-1>. If all 160 bits are zero, the
- index does not require a shared index file.
-
- - An ewah-encoded delete bitmap, each bit represents an entry in the
- shared index. If a bit is set, its corresponding entry in the
- shared index will be removed from the final index. Note, because
- a delete operation changes index entry positions, but we do need
- original positions in replace phase, it's best to just mark
- entries for removal, then do a mass deletion after replacement.
-
- - An ewah-encoded replace bitmap, each bit represents an entry in
- the shared index. If a bit is set, its corresponding entry in the
- shared index will be replaced with an entry in this index
- file. All replaced entries are stored in sorted order in this
- index. The first "1" bit in the replace bitmap corresponds to the
- first index entry, the second "1" bit to the second entry and so
- on. Replaced entries may have empty path names to save space.
-
- The remaining index entries after replaced ones will be added to the
- final index. These added entries are also sorted by entry name then
- stage.
-
-== Untracked cache
-
- Untracked cache saves the untracked file list and necessary data to
- verify the cache. The signature for this extension is { 'U', 'N',
- 'T', 'R' }.
-
- The extension starts with
-
- - A sequence of NUL-terminated strings, preceded by the size of the
- sequence in variable width encoding. Each string describes the
- environment where the cache can be used.
-
- - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from
- ctime field until "file size".
-
- - Stat data of core.excludesfile
-
- - 32-bit dir_flags (see struct dir_struct)
-
- - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file
- does not exist.
-
- - 160-bit SHA-1 of core.excludesfile. Null SHA-1 means the file does
- not exist.
-
- - NUL-terminated string of per-dir exclude file name. This usually
- is ".gitignore".
-
- - The number of following directory blocks, variable width
- encoding. If this number is zero, the extension ends here with a
- following NUL.
-
- - A number of directory blocks in depth-first-search order, each
- consists of
-
- - The number of untracked entries, variable width encoding.
-
- - The number of sub-directory blocks, variable width encoding.
-
- - The directory name terminated by NUL.
-
- - A number of untracked file/dir names terminated by NUL.
-
-The remaining data of each directory block is grouped by type:
-
- - An ewah bitmap, the n-th bit marks whether the n-th directory has
- valid untracked cache entries.
-
- - An ewah bitmap, the n-th bit records "check-only" bit of
- read_directory_recursive() for the n-th directory.
-
- - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data
- is valid for the n-th directory and exists in the next data.
-
- - An array of stat data. The n-th data corresponds with the n-th
- "one" bit in the previous ewah bitmap.
-
- - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit
- in the previous ewah bitmap.
-
- - One NUL.
-*/
diff --git a/formats/index/encoder.go b/formats/index/encoder.go
deleted file mode 100644
index 94fbc68..0000000
--- a/formats/index/encoder.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package index
-
-import (
- "bytes"
- "crypto/sha1"
- "errors"
- "hash"
- "io"
- "time"
-
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-var (
- // EncodeVersionSupported is the range of supported index versions
- EncodeVersionSupported uint32 = 2
-
- // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
- // negative timestamp values
- ErrInvalidTimestamp = errors.New("negative timestamps are not allowed")
-)
-
-// An Encoder writes an Index to an output stream.
-type Encoder struct {
- w io.Writer
- hash hash.Hash
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- h := sha1.New()
- mw := io.MultiWriter(w, h)
- return &Encoder{mw, h}
-}
-
-// Encode writes the Index to the stream of the encoder.
-func (e *Encoder) Encode(idx *Index) error {
- // TODO: support versions v3 and v4
- // TODO: support extensions
- if idx.Version != EncodeVersionSupported {
- return ErrUnsupportedVersion
- }
-
- if err := e.encodeHeader(idx); err != nil {
- return err
- }
-
- if err := e.encodeEntries(idx); err != nil {
- return err
- }
-
- return e.encodeFooter()
-}
-
-func (e *Encoder) encodeHeader(idx *Index) error {
- return binary.Write(e.w,
- indexSignature,
- idx.Version,
- uint32(len(idx.Entries)),
- )
-}
-
-func (e *Encoder) encodeEntries(idx *Index) error {
- for _, entry := range idx.Entries {
- if err := e.encodeEntry(&entry); err != nil {
- return err
- }
-
- wrote := entryHeaderLength + len(entry.Name)
- if err := e.padEntry(wrote); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeEntry(entry *Entry) error {
- if entry.IntentToAdd || entry.SkipWorktree {
- return ErrUnsupportedVersion
- }
-
- sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
- if err != nil {
- return err
- }
-
- msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt)
- if err != nil {
- return err
- }
-
- flags := uint16(entry.Stage&0x3) << 12
- if l := len(entry.Name); l < nameMask {
- flags |= uint16(l)
- } else {
- flags |= nameMask
- }
-
- flow := []interface{}{
- sec, nsec,
- msec, mnsec,
- entry.Dev,
- entry.Inode,
- entry.Mode,
- entry.UID,
- entry.GID,
- entry.Size,
- entry.Hash[:],
- flags,
- }
-
- if err := binary.Write(e.w, flow...); err != nil {
- return err
- }
-
- return binary.Write(e.w, []byte(entry.Name))
-}
-
-func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
- if t.IsZero() {
- return 0, 0, nil
- }
-
- if t.Unix() < 0 || t.UnixNano() < 0 {
- return 0, 0, ErrInvalidTimestamp
- }
-
- return uint32(t.Unix()), uint32(t.Nanosecond()), nil
-}
-
-func (e *Encoder) padEntry(wrote int) error {
- padLen := 8 - wrote%8
-
- _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen))
- return err
-}
-
-func (e *Encoder) encodeFooter() error {
- return binary.Write(e.w, e.hash.Sum(nil))
-}
diff --git a/formats/index/encoder_test.go b/formats/index/encoder_test.go
deleted file mode 100644
index 3085988..0000000
--- a/formats/index/encoder_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package index
-
-import (
- "bytes"
- "strings"
- "time"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git.v4/core"
-)
-
-func (s *IdxfileSuite) TestEncode(c *C) {
- idx := &Index{
- Version: 2,
- Entries: []Entry{{
- CreatedAt: time.Now(),
- ModifiedAt: time.Now(),
- Dev: 4242,
- Inode: 424242,
- UID: 84,
- GID: 8484,
- Size: 42,
- Stage: TheirMode,
- Hash: core.NewHash("e25b29c8946e0e192fae2edc1dabf7be71e8ecf3"),
- Name: "foo",
- }, {
- CreatedAt: time.Now(),
- ModifiedAt: time.Now(),
- Name: strings.Repeat(" ", 20),
- Size: 82,
- }},
- }
-
- buf := bytes.NewBuffer(nil)
- e := NewEncoder(buf)
- err := e.Encode(idx)
- c.Assert(err, IsNil)
-
- output := &Index{}
- d := NewDecoder(buf)
- err = d.Decode(output)
- c.Assert(err, IsNil)
-
- c.Assert(idx, DeepEquals, output)
-}
-
-func (s *IdxfileSuite) TestEncodeUnsuportedVersion(c *C) {
- idx := &Index{Version: 3}
-
- buf := bytes.NewBuffer(nil)
- e := NewEncoder(buf)
- err := e.Encode(idx)
- c.Assert(err, Equals, ErrUnsupportedVersion)
-}
-
-func (s *IdxfileSuite) TestEncodeWithIntentToAddUnsuportedVersion(c *C) {
- idx := &Index{
- Version: 2,
- Entries: []Entry{{IntentToAdd: true}},
- }
-
- buf := bytes.NewBuffer(nil)
- e := NewEncoder(buf)
- err := e.Encode(idx)
- c.Assert(err, Equals, ErrUnsupportedVersion)
-}
-
-func (s *IdxfileSuite) TestEncodeWithSkipWorktreeUnsuportedVersion(c *C) {
- idx := &Index{
- Version: 2,
- Entries: []Entry{{SkipWorktree: true}},
- }
-
- buf := bytes.NewBuffer(nil)
- e := NewEncoder(buf)
- err := e.Encode(idx)
- c.Assert(err, Equals, ErrUnsupportedVersion)
-}
diff --git a/formats/index/index.go b/formats/index/index.go
deleted file mode 100644
index 35a5391..0000000
--- a/formats/index/index.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package index
-
-import (
- "errors"
- "os"
- "time"
-
- "gopkg.in/src-d/go-git.v4/core"
-)
-
-var (
- // ErrUnsupportedVersion is returned by Decode when the idxindex file
- // version is not supported.
- ErrUnsupportedVersion = errors.New("Unsuported version")
-
- indexSignature = []byte{'D', 'I', 'R', 'C'}
- treeExtSignature = []byte{'T', 'R', 'E', 'E'}
- resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'}
-)
-
-// Stage during merge
-type Stage int
-
-const (
- // Merged is the default stage, fully merged
- Merged Stage = 1
- // AncestorMode is the base revision
- AncestorMode Stage = 1
- // OurMode is the first tree revision, ours
- OurMode Stage = 2
- // TheirMode is the second tree revision, theirs
- TheirMode Stage = 3
-)
-
-// Index contains the information about which objects are currently checked out
-// in the worktree, having information about the working files. Changes in
-// worktree are detected using this Index. The Index is also used during merges
-type Index struct {
- Version uint32
- Entries []Entry
- Cache *Tree
- ResolveUndo *ResolveUndo
-}
-
-// Entry represents a single file (or stage of a file) in the cache. An entry
-// represents exactly one stage of a file. If a file path is unmerged then
-// multiple Entry instances may appear for the same path name.
-type Entry struct {
- // Hash is the SHA1 of the represented file
- Hash core.Hash
- // Name is the Entry path name relative to top level directory
- Name string
- // CreatedAt time when the tracked path was created
- CreatedAt time.Time
- // ModifiedAt time when the tracked path was changed
- ModifiedAt time.Time
- // Dev and Inode of the tracked path
- Dev, Inode uint32
- // Mode of the path
- Mode os.FileMode
- // UID and GID, userid and group id of the owner
- UID, GID uint32
- // Size is the length in bytes for regular files
- Size uint32
- // Stage on a merge is defines what stage is representing this entry
- // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging
- Stage Stage
- // SkipWorktree used in sparse checkouts
- // https://git-scm.com/docs/git-read-tree#_sparse_checkout
- SkipWorktree bool
- // IntentToAdd record only the fact that the path will be added later
- // https://git-scm.com/docs/git-add ("git add -N")
- IntentToAdd bool
-}
-
-// Tree contains pre-computed hashes for trees that can be derived from the
-// index. It helps speed up tree object generation from index for a new commit.
-type Tree struct {
- Entries []TreeEntry
-}
-
-// TreeEntry entry of a cached Tree
-type TreeEntry struct {
- // Path component (relative to its parent directory)
- Path string
- // Entries is the number of entries in the index that is covered by the tree
- // this entry represents
- Entries int
- // Trees is the number that represents the number of subtrees this tree has
- Trees int
- // Hash object name for the object that would result from writing this span
- // of index as a tree.
- Hash core.Hash
-}
-
-// ResolveUndo when a conflict is resolved (e.g. with "git add path"), these
-// higher stage entries will be removed and a stage-0 entry with proper
-// resolution is added. When these higher stage entries are removed, they are
-// saved in the resolve undo extension
-type ResolveUndo struct {
- Entries []ResolveUndoEntry
-}
-
-// ResolveUndoEntry contains the information about a conflict when is resolved
-type ResolveUndoEntry struct {
- Path string
- Stages map[Stage]core.Hash
-}
diff --git a/formats/objfile/common_test.go b/formats/objfile/common_test.go
deleted file mode 100644
index 682dfbb..0000000
--- a/formats/objfile/common_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package objfile
-
-import (
- "encoding/base64"
- "testing"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git.v4/core"
-)
-
-type objfileFixture struct {
- hash string // hash of data
- t core.ObjectType // object type
- content string // base64-encoded content
- data string // base64-encoded objfile data
-}
-
-var objfileFixtures = []objfileFixture{
- {
- "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391",
- core.BlobObject,
- base64.StdEncoding.EncodeToString([]byte("")),
- "eAFLyslPUjBgAAAJsAHw",
- },
- {
- "a8a940627d132695a9769df883f85992f0ff4a43",
- core.BlobObject,
- base64.StdEncoding.EncodeToString([]byte("this is a test")),
- "eAFLyslPUjA0YSjJyCxWAKJEhZLU4hIAUDYHOg==",
- },
- {
- "4dc2174801ac4a3d36886210fd086fbe134cf7b2",
- core.BlobObject,
- base64.StdEncoding.EncodeToString([]byte("this\nis\n\n\na\nmultiline\n\ntest.\n")),
- "eAFLyslPUjCyZCjJyCzmAiIurkSu3NKcksyczLxULq6S1OISPS4A1I8LMQ==",
- },
- {
- "13e6f47dd57798bfdc728d91f5c6d7f40c5bb5fc",
- core.BlobObject,
- base64.StdEncoding.EncodeToString([]byte("this tests\r\nCRLF\r\nencoded files.\r\n")),
- "eAFLyslPUjA2YSjJyCxWKEktLinm5XIO8nHj5UrNS85PSU1RSMvMSS3W4+UCABp3DNE=",
- },
- {
- "72a7bc4667ab068e954172437b993d9fbaa137cb",
- core.BlobObject,
- base64.StdEncoding.EncodeToString([]byte("test@example.com")),
- "eAFLyslPUjA0YyhJLS5xSK1IzC3ISdVLzs8FAGVtCIA=",
- },
- {
- "bb2b40e85ec0455d1de72daff71583f0dd72a33f",
- core.BlobObject,
- base64.StdEncoding.EncodeToString([]byte("package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"io\"\r\n\t\"os\"\r\n\r\n\t\"gopkg.in/src-d/go-git.v3\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Printf(\"Retrieving %q ...\\n\", os.Args[2])\r\n\tr, err := git.NewRepository(os.Args[2], nil)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tif err := r.Pull(\"origin\", \"refs/heads/master\"); err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tdumpCommits(r)\r\n}\r\n\r\nfunc dumpCommits(r *git.Repository) {\r\n\titer := r.Commits()\r\n\tdefer iter.Close()\r\n\r\n\tfor {\r\n\t\tcommit, err := iter.Next()\r\n\t\tif err != nil {\r\n\t\t\tif err == io.EOF {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\r\n\t\tfmt.Println(commit)\r\n\t}\r\n}\r\n")),
- "eAGNUU1LAzEU9JpC/0NcEFJps2ARQdmDFD3W0qt6SHez8dHdZH1JqyL+d/Oy/aDgQVh47LzJTGayatyKX99MzzpVrpXRvFVgh4PhANrOYeBiOGBZ3YaMJrg0nI+D/o3r1kaCzT2Wkyo3bmIgyO00rkfEqDe2TIJixL/jgagjFwg21CJb6oCgt2ANv3jnUsoXm4258/IejX++eo0CDMdcI/LbgpPuXH8sdec8BIdf4sgccwsN0aFO9POCgGTIOmWhFFGE9j/p1jtWFEW52DSNyByCAXLPUNc+f9Oq8nmrfNCYje7+o1lt2m7m2haCF2SVnFL6kw2/pBzHEH0rEH0oI8q9BF220nWEaSdnjfNaRDDCtcM+WZnsDgUl4lx/BuKxv6rYY0XBwcmHp8deh7EVarWmQ7uC2Glre/TweI0VvTk5xaTx+wWX66Gs",
- },
- {
- "e94db0f9ffca44dc7bade6a3591f544183395a7c",
- core.TreeObject,
- "MTAwNjQ0IFRlc3QgMS50eHQAqKlAYn0TJpWpdp34g/hZkvD/SkMxMDA2NDQgVGVzdCAyLnR4dABNwhdIAaxKPTaIYhD9CG++E0z3sjEwMDY0NCBUZXN0IDMudHh0ABPm9H3Vd5i/3HKNkfXG1/QMW7X8MTAwNjQ0IFRlc3QgNC50eHQAcqe8RmerBo6VQXJDe5k9n7qhN8sxMDA2NDQgVGVzdCA1LnR4dAC7K0DoXsBFXR3nLa/3FYPw3XKjPw==",
- "eAErKUpNVTC0NGAwNDAwMzFRCEktLlEw1CupKGFYsdIhqVZYberKsrk/mn9ETvrw38sZWZURWJXvIXEPxjVetmYdSQJ/OfL3Cft834SsyhisSvjZl9qr5TP23ynqnfj12PUvPNFb/yCrMgGrKlq+xy19NVvfVMci5+qZtvN3LTQ/jazKFKxqt7bDi7gDrrGyz3XXfxdt/nC3aLE9AA2STmk=",
- },
- {
- "9d7f8a56eaf92469dee8a856e716a03387ddb076",
- core.CommitObject,
- "dHJlZSBlOTRkYjBmOWZmY2E0NGRjN2JhZGU2YTM1OTFmNTQ0MTgzMzk1YTdjCmF1dGhvciBKb3NodWEgU2pvZGluZyA8am9zaHVhLnNqb2RpbmdAc2NqYWxsaWFuY2UuY29tPiAxNDU2NTMxNTgzIC0wODAwCmNvbW1pdHRlciBKb3NodWEgU2pvZGluZyA8am9zaHVhLnNqb2RpbmdAc2NqYWxsaWFuY2UuY29tPiAxNDU2NTMxNTgzIC0wODAwCgpUZXN0IENvbW1pdAo=",
- "eAGtjksOgjAUAF33FO8CktZ+aBNjTNy51Qs8Xl8FAjSh5f4SvILLmcVkKM/zUOEi3amuzMDBxE6mkBKhMZHaDiM71DaoZI1RXutgsSWBW+3zCs9c+g3hNeY4LB+4jgc35cf3QiNO04ALcUN5voEy1lmtrNdwll5Ksdt9oPIfUuLNpcLjCIov3ApFmQ==",
- },
-}
-
-func Test(t *testing.T) { TestingT(t) }
diff --git a/formats/objfile/reader.go b/formats/objfile/reader.go
deleted file mode 100644
index 5c319f6..0000000
--- a/formats/objfile/reader.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package objfile
-
-import (
- "compress/zlib"
- "errors"
- "io"
- "strconv"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packfile"
-)
-
-var (
- ErrClosed = errors.New("objfile: already closed")
- ErrHeader = errors.New("objfile: invalid header")
- ErrNegativeSize = errors.New("objfile: negative object size")
-)
-
-// Reader reads and decodes compressed objfile data from a provided io.Reader.
-// Reader implements io.ReadCloser. Close should be called when finished with
-// the Reader. Close will not close the underlying io.Reader.
-type Reader struct {
- multi io.Reader
- zlib io.ReadCloser
- hasher core.Hasher
-}
-
-// NewReader returns a new Reader reading from r.
-func NewReader(r io.Reader) (*Reader, error) {
- zlib, err := zlib.NewReader(r)
- if err != nil {
- return nil, packfile.ErrZLib.AddDetails(err.Error())
- }
-
- return &Reader{
- zlib: zlib,
- }, nil
-}
-
-// Header reads the type and the size of object, and prepares the reader for read
-func (r *Reader) Header() (t core.ObjectType, size int64, err error) {
- var raw []byte
- raw, err = r.readUntil(' ')
- if err != nil {
- return
- }
-
- t, err = core.ParseObjectType(string(raw))
- if err != nil {
- return
- }
-
- raw, err = r.readUntil(0)
- if err != nil {
- return
- }
-
- size, err = strconv.ParseInt(string(raw), 10, 64)
- if err != nil {
- err = ErrHeader
- return
- }
-
- defer r.prepareForRead(t, size)
- return
-}
-
-// readSlice reads one byte at a time from r until it encounters delim or an
-// error.
-func (r *Reader) readUntil(delim byte) ([]byte, error) {
- var buf [1]byte
- value := make([]byte, 0, 16)
- for {
- if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) {
- if err == io.EOF {
- return nil, ErrHeader
- }
- return nil, err
- }
-
- if buf[0] == delim {
- return value, nil
- }
-
- value = append(value, buf[0])
- }
-}
-
-func (r *Reader) prepareForRead(t core.ObjectType, size int64) {
- r.hasher = core.NewHasher(t, size)
- r.multi = io.TeeReader(r.zlib, r.hasher)
-}
-
-// Read reads len(p) bytes into p from the object data stream. It returns
-// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even
-// if Read returns n < len(p), it may use all of p as scratch space during the
-// call.
-//
-// If Read encounters the end of the data stream it will return err == io.EOF,
-// either in the current call if n > 0 or in a subsequent call.
-func (r *Reader) Read(p []byte) (n int, err error) {
- return r.multi.Read(p)
-}
-
-// Hash returns the hash of the object data stream that has been read so far.
-func (r *Reader) Hash() core.Hash {
- return r.hasher.Sum()
-}
-
-// Close releases any resources consumed by the Reader. Calling Close does not
-// close the wrapped io.Reader originally passed to NewReader.
-func (r *Reader) Close() error {
- if err := r.zlib.Close(); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/formats/objfile/reader_test.go b/formats/objfile/reader_test.go
deleted file mode 100644
index a383fd2..0000000
--- a/formats/objfile/reader_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package objfile
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
- "io"
- "io/ioutil"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git.v4/core"
-)
-
-type SuiteReader struct{}
-
-var _ = Suite(&SuiteReader{})
-
-func (s *SuiteReader) TestReadObjfile(c *C) {
- for k, fixture := range objfileFixtures {
- com := fmt.Sprintf("test %d: ", k)
- hash := core.NewHash(fixture.hash)
- content, _ := base64.StdEncoding.DecodeString(fixture.content)
- data, _ := base64.StdEncoding.DecodeString(fixture.data)
-
- testReader(c, bytes.NewReader(data), hash, fixture.t, content, com)
- }
-}
-
-func testReader(c *C, source io.Reader, hash core.Hash, t core.ObjectType, content []byte, com string) {
- r, err := NewReader(source)
- c.Assert(err, IsNil)
-
- typ, size, err := r.Header()
- c.Assert(err, IsNil)
- c.Assert(typ, Equals, t)
- c.Assert(content, HasLen, int(size))
-
- rc, err := ioutil.ReadAll(r)
- c.Assert(err, IsNil)
- c.Assert(rc, DeepEquals, content, Commentf("%scontent=%s, expected=%s", base64.StdEncoding.EncodeToString(rc), base64.StdEncoding.EncodeToString(content)))
-
- c.Assert(r.Hash(), Equals, hash) // Test Hash() before close
- c.Assert(r.Close(), IsNil)
-
-}
-
-func (s *SuiteReader) TestReadEmptyObjfile(c *C) {
- source := bytes.NewReader([]byte{})
- _, err := NewReader(source)
- c.Assert(err, NotNil)
-}
-
-func (s *SuiteReader) TestReadGarbage(c *C) {
- source := bytes.NewReader([]byte("!@#$RO!@NROSADfinq@o#irn@oirfn"))
- _, err := NewReader(source)
- c.Assert(err, NotNil)
-}
-
-func (s *SuiteReader) TestReadCorruptZLib(c *C) {
- data, _ := base64.StdEncoding.DecodeString("eAFLysaalPUjBgAAAJsAHw")
- source := bytes.NewReader(data)
- r, err := NewReader(source)
- c.Assert(err, IsNil)
-
- _, _, err = r.Header()
- c.Assert(err, NotNil)
-}
diff --git a/formats/objfile/writer.go b/formats/objfile/writer.go
deleted file mode 100644
index d2f2314..0000000
--- a/formats/objfile/writer.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package objfile
-
-import (
- "compress/zlib"
- "errors"
- "io"
- "strconv"
-
- "gopkg.in/src-d/go-git.v4/core"
-)
-
-var (
- ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)")
-)
-
-// Writer writes and encodes data in compressed objfile format to a provided
-// io.Writerº. Close should be called when finished with the Writer. Close will
-// not close the underlying io.Writer.
-type Writer struct {
- raw io.Writer
- zlib io.WriteCloser
- hasher core.Hasher
- multi io.Writer
-
- closed bool
- pending int64 // number of unwritten bytes
-}
-
-// NewWriter returns a new Writer writing to w.
-//
-// The returned Writer implements io.WriteCloser. Close should be called when
-// finished with the Writer. Close will not close the underlying io.Writer.
-func NewWriter(w io.Writer) *Writer {
- return &Writer{
- raw: w,
- zlib: zlib.NewWriter(w),
- }
-}
-
-// WriteHeader writes the type and the size and prepares to accept the object's
-// contents. If an invalid t is provided, core.ErrInvalidType is returned. If a
-// negative size is provided, ErrNegativeSize is returned.
-func (w *Writer) WriteHeader(t core.ObjectType, size int64) error {
- if !t.Valid() {
- return core.ErrInvalidType
- }
- if size < 0 {
- return ErrNegativeSize
- }
-
- b := t.Bytes()
- b = append(b, ' ')
- b = append(b, []byte(strconv.FormatInt(size, 10))...)
- b = append(b, 0)
-
- defer w.prepareForWrite(t, size)
- _, err := w.zlib.Write(b)
-
- return err
-}
-
-func (w *Writer) prepareForWrite(t core.ObjectType, size int64) {
- w.pending = size
-
- w.hasher = core.NewHasher(t, size)
- w.multi = io.MultiWriter(w.zlib, w.hasher)
-}
-
-// Write writes the object's contents. Write returns the error ErrOverflow if
-// more than size bytes are written after WriteHeader.
-func (w *Writer) Write(p []byte) (n int, err error) {
- if w.closed {
- return 0, ErrClosed
- }
-
- overwrite := false
- if int64(len(p)) > w.pending {
- p = p[0:w.pending]
- overwrite = true
- }
-
- n, err = w.multi.Write(p)
- w.pending -= int64(n)
- if err == nil && overwrite {
- err = ErrOverflow
- return
- }
-
- return
-}
-
-// Hash returns the hash of the object data stream that has been written so far.
-// It can be called before or after Close.
-func (w *Writer) Hash() core.Hash {
- return w.hasher.Sum() // Not yet closed, return hash of data written so far
-}
-
-// Close releases any resources consumed by the Writer.
-//
-// Calling Close does not close the wrapped io.Writer originally passed to
-// NewWriter.
-func (w *Writer) Close() error {
- if err := w.zlib.Close(); err != nil {
- return err
- }
-
- w.closed = true
- return nil
-}
diff --git a/formats/objfile/writer_test.go b/formats/objfile/writer_test.go
deleted file mode 100644
index 18bba79..0000000
--- a/formats/objfile/writer_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package objfile
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
- "io"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git.v4/core"
-)
-
-type SuiteWriter struct{}
-
-var _ = Suite(&SuiteWriter{})
-
-func (s *SuiteWriter) TestWriteObjfile(c *C) {
- for k, fixture := range objfileFixtures {
- buffer := bytes.NewBuffer(nil)
-
- com := fmt.Sprintf("test %d: ", k)
- hash := core.NewHash(fixture.hash)
- content, _ := base64.StdEncoding.DecodeString(fixture.content)
-
- // Write the data out to the buffer
- testWriter(c, buffer, hash, fixture.t, content)
-
- // Read the data back in from the buffer to be sure it matches
- testReader(c, buffer, hash, fixture.t, content, com)
- }
-}
-
-func testWriter(c *C, dest io.Writer, hash core.Hash, t core.ObjectType, content []byte) {
- size := int64(len(content))
- w := NewWriter(dest)
-
- err := w.WriteHeader(t, size)
- c.Assert(err, IsNil)
-
- written, err := io.Copy(w, bytes.NewReader(content))
- c.Assert(err, IsNil)
- c.Assert(written, Equals, size)
-
- c.Assert(w.Hash(), Equals, hash)
- c.Assert(w.Close(), IsNil)
-}
-
-func (s *SuiteWriter) TestWriteOverflow(c *C) {
- buf := bytes.NewBuffer(nil)
- w := NewWriter(buf)
-
- err := w.WriteHeader(core.BlobObject, 8)
- c.Assert(err, IsNil)
-
- n, err := w.Write([]byte("1234"))
- c.Assert(err, IsNil)
- c.Assert(n, Equals, 4)
-
- n, err = w.Write([]byte("56789"))
- c.Assert(err, Equals, ErrOverflow)
- c.Assert(n, Equals, 4)
-}
-
-func (s *SuiteWriter) TestNewWriterInvalidType(c *C) {
- buf := bytes.NewBuffer(nil)
- w := NewWriter(buf)
-
- err := w.WriteHeader(core.InvalidObject, 8)
- c.Assert(err, Equals, core.ErrInvalidType)
-}
-
-func (s *SuiteWriter) TestNewWriterInvalidSize(c *C) {
- buf := bytes.NewBuffer(nil)
- w := NewWriter(buf)
-
- err := w.WriteHeader(core.BlobObject, -1)
- c.Assert(err, Equals, ErrNegativeSize)
- err = w.WriteHeader(core.BlobObject, -1651860)
- c.Assert(err, Equals, ErrNegativeSize)
-}
diff --git a/formats/packfile/decoder.go b/formats/packfile/decoder.go
deleted file mode 100644
index e96980a..0000000
--- a/formats/packfile/decoder.go
+++ /dev/null
@@ -1,306 +0,0 @@
-package packfile
-
-import (
- "bytes"
-
- "gopkg.in/src-d/go-git.v4/core"
-)
-
-// Format specifies if the packfile uses ref-deltas or ofs-deltas.
-type Format int
-
-// Possible values of the Format type.
-const (
- UnknownFormat Format = iota
- OFSDeltaFormat
- REFDeltaFormat
-)
-
-var (
- // ErrMaxObjectsLimitReached is returned by Decode when the number
- // of objects in the packfile is higher than
- // Decoder.MaxObjectsLimit.
- ErrMaxObjectsLimitReached = NewError("max. objects limit reached")
- // ErrInvalidObject is returned by Decode when an invalid object is
- // found in the packfile.
- ErrInvalidObject = NewError("invalid git object")
- // ErrPackEntryNotFound is returned by Decode when a reference in
- // the packfile references and unknown object.
- ErrPackEntryNotFound = NewError("can't find a pack entry")
- // ErrZLib is returned by Decode when there was an error unzipping
- // the packfile contents.
- ErrZLib = NewError("zlib reading error")
- // ErrCannotRecall is returned by RecallByOffset or RecallByHash if the object
- // to recall cannot be returned.
- ErrCannotRecall = NewError("cannot recall object")
- // ErrNonSeekable is returned if a NewDecoder is used with a non-seekable
- // reader and without a core.ObjectStorage or ReadObjectAt method is called
- // without a seekable scanner
- ErrNonSeekable = NewError("non-seekable scanner")
- // ErrRollback error making Rollback over a transaction after an error
- ErrRollback = NewError("rollback error, during set error")
-)
-
-// Decoder reads and decodes packfiles from an input stream.
-type Decoder struct {
- s *Scanner
- o core.ObjectStorer
- tx core.Transaction
-
- offsetToHash map[int64]core.Hash
- hashToOffset map[core.Hash]int64
- crcs map[core.Hash]uint32
-}
-
-// NewDecoder returns a new Decoder that reads from r.
-func NewDecoder(s *Scanner, o core.ObjectStorer) (*Decoder, error) {
- if !s.IsSeekable && o == nil {
- return nil, ErrNonSeekable
- }
-
- return &Decoder{
- s: s,
- o: o,
-
- offsetToHash: make(map[int64]core.Hash, 0),
- hashToOffset: make(map[core.Hash]int64, 0),
- crcs: make(map[core.Hash]uint32, 0),
- }, nil
-}
-
-// Decode reads a packfile and stores it in the value pointed to by s.
-func (d *Decoder) Decode() (checksum core.Hash, err error) {
- if err := d.doDecode(); err != nil {
- return core.ZeroHash, err
- }
-
- return d.s.Checksum()
-}
-
-func (d *Decoder) doDecode() error {
- _, count, err := d.s.Header()
- if err != nil {
- return err
- }
-
- _, isTxStorer := d.o.(core.Transactioner)
- switch {
- case d.o == nil:
- return d.readObjects(int(count))
- case isTxStorer:
- return d.readObjectsWithObjectStorerTx(int(count))
- default:
- return d.readObjectsWithObjectStorer(int(count))
- }
-}
-
-func (d *Decoder) readObjects(count int) error {
- for i := 0; i < count; i++ {
- if _, err := d.ReadObject(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Decoder) readObjectsWithObjectStorer(count int) error {
- for i := 0; i < count; i++ {
- obj, err := d.ReadObject()
- if err != nil {
- return err
- }
-
- if _, err := d.o.SetObject(obj); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Decoder) readObjectsWithObjectStorerTx(count int) error {
- tx := d.o.(core.Transactioner).Begin()
-
- for i := 0; i < count; i++ {
- obj, err := d.ReadObject()
- if err != nil {
- return err
- }
-
- if _, err := tx.SetObject(obj); err != nil {
- if rerr := d.tx.Rollback(); rerr != nil {
- return ErrRollback.AddDetails(
- "error: %s, during tx.Set error: %s", rerr, err,
- )
- }
-
- return err
- }
-
- }
-
- return tx.Commit()
-}
-
-// ReadObject reads a object from the stream and return it
-func (d *Decoder) ReadObject() (core.Object, error) {
- h, err := d.s.NextObjectHeader()
- if err != nil {
- return nil, err
- }
-
- obj := d.newObject()
- obj.SetSize(h.Length)
- obj.SetType(h.Type)
- var crc uint32
- switch h.Type {
- case core.CommitObject, core.TreeObject, core.BlobObject, core.TagObject:
- crc, err = d.fillRegularObjectContent(obj)
- case core.REFDeltaObject:
- crc, err = d.fillREFDeltaObjectContent(obj, h.Reference)
- case core.OFSDeltaObject:
- crc, err = d.fillOFSDeltaObjectContent(obj, h.OffsetReference)
- default:
- err = ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-
- if err != nil {
- return obj, err
- }
-
- hash := obj.Hash()
- d.setOffset(hash, h.Offset)
- d.setCRC(hash, crc)
-
- return obj, nil
-}
-
-func (d *Decoder) newObject() core.Object {
- if d.o == nil {
- return &core.MemoryObject{}
- }
-
- return d.o.NewObject()
-}
-
-// ReadObjectAt reads an object at the given location
-func (d *Decoder) ReadObjectAt(offset int64) (core.Object, error) {
- if !d.s.IsSeekable {
- return nil, ErrNonSeekable
- }
-
- beforeJump, err := d.s.Seek(offset)
- if err != nil {
- return nil, err
- }
-
- defer func() {
- _, seekErr := d.s.Seek(beforeJump)
- if err == nil {
- err = seekErr
- }
- }()
-
- return d.ReadObject()
-}
-
-func (d *Decoder) fillRegularObjectContent(obj core.Object) (uint32, error) {
- w, err := obj.Writer()
- if err != nil {
- return 0, err
- }
-
- _, crc, err := d.s.NextObject(w)
- return crc, err
-}
-
-func (d *Decoder) fillREFDeltaObjectContent(obj core.Object, ref core.Hash) (uint32, error) {
- buf := bytes.NewBuffer(nil)
- _, crc, err := d.s.NextObject(buf)
- if err != nil {
- return 0, err
- }
-
- base, err := d.recallByHash(ref)
- if err != nil {
- return 0, err
- }
-
- obj.SetType(base.Type())
- return crc, ApplyDelta(obj, base, buf.Bytes())
-}
-
-func (d *Decoder) fillOFSDeltaObjectContent(obj core.Object, offset int64) (uint32, error) {
- buf := bytes.NewBuffer(nil)
- _, crc, err := d.s.NextObject(buf)
- if err != nil {
- return 0, err
- }
-
- base, err := d.recallByOffset(offset)
- if err != nil {
- return 0, err
- }
-
- obj.SetType(base.Type())
- return crc, ApplyDelta(obj, base, buf.Bytes())
-}
-
-func (d *Decoder) setOffset(h core.Hash, offset int64) {
- d.offsetToHash[offset] = h
- d.hashToOffset[h] = offset
-}
-
-func (d *Decoder) setCRC(h core.Hash, crc uint32) {
- d.crcs[h] = crc
-}
-
-func (d *Decoder) recallByOffset(o int64) (core.Object, error) {
- if d.s.IsSeekable {
- return d.ReadObjectAt(o)
- }
-
- if h, ok := d.offsetToHash[o]; ok {
- return d.tx.Object(core.AnyObject, h)
- }
-
- return nil, core.ErrObjectNotFound
-}
-
-func (d *Decoder) recallByHash(h core.Hash) (core.Object, error) {
- if d.s.IsSeekable {
- if o, ok := d.hashToOffset[h]; ok {
- return d.ReadObjectAt(o)
- }
- }
-
- obj, err := d.tx.Object(core.AnyObject, h)
- if err != core.ErrObjectNotFound {
- return obj, err
- }
-
- return nil, core.ErrObjectNotFound
-}
-
-// SetOffsets sets the offsets, required when using the method ReadObjectAt,
-// without decoding the full packfile
-func (d *Decoder) SetOffsets(offsets map[core.Hash]int64) {
- d.hashToOffset = offsets
-}
-
-// Offsets returns the objects read offset
-func (d *Decoder) Offsets() map[core.Hash]int64 {
- return d.hashToOffset
-}
-
-// CRCs returns the CRC-32 for each objected read
-func (d *Decoder) CRCs() map[core.Hash]uint32 {
- return d.crcs
-}
-
-// Close close the Scanner, usually this mean that the whole reader is read and
-// discarded
-func (d *Decoder) Close() error {
- return d.s.Close()
-}
diff --git a/formats/packfile/decoder_test.go b/formats/packfile/decoder_test.go
deleted file mode 100644
index aa178d7..0000000
--- a/formats/packfile/decoder_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package packfile
-
-import (
- "io"
- "testing"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/fixtures"
- "gopkg.in/src-d/go-git.v4/formats/idxfile"
- "gopkg.in/src-d/go-git.v4/storage/memory"
-
- . "gopkg.in/check.v1"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-type ReaderSuite struct {
- fixtures.Suite
-}
-
-var _ = Suite(&ReaderSuite{})
-
-func (s *ReaderSuite) TestNewDecodeNonSeekable(c *C) {
- scanner := NewScanner(nil)
- d, err := NewDecoder(scanner, nil)
-
- c.Assert(d, IsNil)
- c.Assert(err, NotNil)
-}
-
-func (s *ReaderSuite) TestDecode(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- scanner := NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- d, err := NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- defer d.Close()
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
-
- assertObjects(c, storage, expectedHashes)
- })
-}
-
-func (s *ReaderSuite) TestDecodeInMemory(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- scanner := NewScanner(f.Packfile())
- d, err := NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
- })
-}
-
-var expectedHashes = []string{
- "918c48b83bd081e863dbe1b80f8998f058cd8294",
- "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
- "1669dce138d9b841a518c64b10914d88f5e488ea",
- "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
- "b8e471f58bcbca63b07bda20e428190409c2db47",
- "35e85108805c84807bc66a02d91535e1e24b38b9",
- "b029517f6300c2da0f4b651b8642506cd6aaf45d",
- "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
- "d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
- "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
- "d5c0f4ab811897cadf03aec358ae60d21f91c50d",
- "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
- "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
- "9dea2395f5403188298c1dabe8bdafe562c491e3",
- "586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
- "9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
- "5a877e6a906a2743ad6e45d99c1793642aaf8eda",
- "c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
- "a8d315b2b1c615d43042c3a62402b8a54288cf5c",
- "a39771a7651f97faf5c72e08224d857fc35133db",
- "880cd14280f4b9b6ed3986d6671f907d7cc2a198",
- "fb72698cab7617ac416264415f13224dfd7a165e",
- "4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
- "eba74343e2f15d62adedfd8c883ee0262b5c8021",
- "c2d30fa8ef288618f65f6eed6e168e0d514886f4",
- "8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
- "aa9b383c260e1d05fbbf6b30a02914555e20c725",
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
- "dbd3641b371024f44d0e469a9c8f5457b0660de1",
- "e8d3ffab552895c19b9fcf7aa264d277cde33881",
- "7e59600739c96546163833214c36459e324bad0a",
-}
-
-func (s *ReaderSuite) TestDecodeCRCs(c *C) {
- f := fixtures.Basic().ByTag("ofs-delta").One()
-
- scanner := NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- d, err := NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- _, err = d.Decode()
- c.Assert(err, IsNil)
-
- var sum uint64
- for _, crc := range d.CRCs() {
- sum += uint64(crc)
- }
-
- c.Assert(int(sum), Equals, 78022211966)
-}
-
-func (s *ReaderSuite) TestReadObjectAt(c *C) {
- f := fixtures.Basic().One()
- scanner := NewScanner(f.Packfile())
- d, err := NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- // when the packfile is ref-delta based, the offsets are required
- if f.Is("ref-delta") {
- offsets := getOffsetsFromIdx(f.Idx())
- d.SetOffsets(offsets)
- }
-
- // the objects at reference 186, is a delta, so should be recall,
- // without being read before.
- obj, err := d.ReadObjectAt(186)
- c.Assert(err, IsNil)
- c.Assert(obj.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
-}
-
-func (s *ReaderSuite) TestOffsets(c *C) {
- f := fixtures.Basic().One()
- scanner := NewScanner(f.Packfile())
- d, err := NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- c.Assert(d.Offsets(), HasLen, 0)
-
- _, err = d.Decode()
- c.Assert(err, IsNil)
-
- c.Assert(d.Offsets(), HasLen, 31)
-}
-
-func (s *ReaderSuite) TestSetOffsets(c *C) {
- f := fixtures.Basic().One()
- scanner := NewScanner(f.Packfile())
- d, err := NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- h := core.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
- d.SetOffsets(map[core.Hash]int64{h: 42})
-
- o := d.Offsets()
- c.Assert(o, HasLen, 1)
- c.Assert(o[h], Equals, int64(42))
-}
-
-func assertObjects(c *C, s *memory.Storage, expects []string) {
- c.Assert(len(expects), Equals, len(s.Objects))
- for _, exp := range expects {
- obt, err := s.Object(core.AnyObject, core.NewHash(exp))
- c.Assert(err, IsNil)
- c.Assert(obt.Hash().String(), Equals, exp)
- }
-}
-
-func getOffsetsFromIdx(r io.Reader) map[core.Hash]int64 {
- idx := &idxfile.Idxfile{}
- err := idxfile.NewDecoder(r).Decode(idx)
- if err != nil {
- panic(err)
- }
-
- offsets := make(map[core.Hash]int64)
- for _, e := range idx.Entries {
- offsets[e.Hash] = int64(e.Offset)
- }
-
- return offsets
-}
diff --git a/formats/packfile/delta.go b/formats/packfile/delta.go
deleted file mode 100644
index d08f969..0000000
--- a/formats/packfile/delta.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package packfile
-
-import (
- "io/ioutil"
-
- "gopkg.in/src-d/go-git.v4/core"
-)
-
-// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
-// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c,
-// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
-// for details about the delta format.
-
-const deltaSizeMin = 4
-
-// ApplyDelta writes to taget the result of applying the modification deltas in delta to base.
-func ApplyDelta(target, base core.Object, delta []byte) error {
- r, err := base.Reader()
- if err != nil {
- return err
- }
-
- w, err := target.Writer()
- if err != nil {
- return err
- }
-
- src, err := ioutil.ReadAll(r)
- if err != nil {
- return err
- }
-
- dst := PatchDelta(src, delta)
- target.SetSize(int64(len(dst)))
-
- if _, err := w.Write(dst); err != nil {
- return err
- }
-
- return nil
-}
-
-// PatchDelta returns the result of applying the modification deltas in delta to src.
-func PatchDelta(src, delta []byte) []byte {
- if len(delta) < deltaSizeMin {
- return nil
- }
-
- srcSz, delta := decodeLEB128(delta)
- if srcSz != uint(len(src)) {
- return nil
- }
-
- targetSz, delta := decodeLEB128(delta)
- remainingTargetSz := targetSz
-
- var dest []byte
- var cmd byte
- for {
- cmd = delta[0]
- delta = delta[1:]
- if isCopyFromSrc(cmd) {
- var offset, sz uint
- offset, delta = decodeOffset(cmd, delta)
- sz, delta = decodeSize(cmd, delta)
- if invalidSize(sz, targetSz) ||
- invalidOffsetSize(offset, sz, srcSz) {
- break
- }
- dest = append(dest, src[offset:offset+sz]...)
- remainingTargetSz -= sz
- } else if isCopyFromDelta(cmd) {
- sz := uint(cmd) // cmd is the size itself
- if invalidSize(sz, targetSz) {
- break
- }
- dest = append(dest, delta[0:sz]...)
- remainingTargetSz -= sz
- delta = delta[sz:]
- } else {
- return nil
- }
-
- if remainingTargetSz <= 0 {
- break
- }
- }
-
- return dest
-}
-
-// Decodes a number encoded as an unsigned LEB128 at the start of some
-// binary data and returns the decoded number and the rest of the
-// stream.
-//
-// This must be called twice on the delta data buffer, first to get the
-// expected source buffer size, and again to get the target buffer size.
-func decodeLEB128(input []byte) (uint, []byte) {
- var num, sz uint
- var b byte
- for {
- b = input[sz]
- num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
- sz++
-
- if uint(b)&continuation == 0 || sz == uint(len(input)) {
- break
- }
- }
-
- return num, input[sz:]
-}
-
-const (
- payload = 0x7f // 0111 1111
- continuation = 0x80 // 1000 0000
-)
-
-func isCopyFromSrc(cmd byte) bool {
- return (cmd & 0x80) != 0
-}
-
-func isCopyFromDelta(cmd byte) bool {
- return (cmd&0x80) == 0 && cmd != 0
-}
-
-func decodeOffset(cmd byte, delta []byte) (uint, []byte) {
- var offset uint
- if (cmd & 0x01) != 0 {
- offset = uint(delta[0])
- delta = delta[1:]
- }
- if (cmd & 0x02) != 0 {
- offset |= uint(delta[0]) << 8
- delta = delta[1:]
- }
- if (cmd & 0x04) != 0 {
- offset |= uint(delta[0]) << 16
- delta = delta[1:]
- }
- if (cmd & 0x08) != 0 {
- offset |= uint(delta[0]) << 24
- delta = delta[1:]
- }
-
- return offset, delta
-}
-
-func decodeSize(cmd byte, delta []byte) (uint, []byte) {
- var sz uint
- if (cmd & 0x10) != 0 {
- sz = uint(delta[0])
- delta = delta[1:]
- }
- if (cmd & 0x20) != 0 {
- sz |= uint(delta[0]) << 8
- delta = delta[1:]
- }
- if (cmd & 0x40) != 0 {
- sz |= uint(delta[0]) << 16
- delta = delta[1:]
- }
- if sz == 0 {
- sz = 0x10000
- }
-
- return sz, delta
-}
-
-func invalidSize(sz, targetSz uint) bool {
- return sz > targetSz
-}
-
-func invalidOffsetSize(offset, sz, srcSz uint) bool {
- return sumOverflows(offset, sz) ||
- offset+sz > srcSz
-}
-
-func sumOverflows(a, b uint) bool {
- return a+b < a
-}
diff --git a/formats/packfile/doc.go b/formats/packfile/doc.go
deleted file mode 100644
index 0b173ca..0000000
--- a/formats/packfile/doc.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Package packfile implements a encoder/decoder of packfile format
-package packfile
-
-/*
-GIT pack format
-===============
-
-== pack-*.pack files have the following format:
-
- - A header appears at the beginning and consists of the following:
-
- 4-byte signature:
- The signature is: {'P', 'A', 'C', 'K'}
-
- 4-byte version number (network byte order):
- GIT currently accepts version number 2 or 3 but
- generates version 2 only.
-
- 4-byte number of objects contained in the pack (network byte order)
-
- Observation: we cannot have more than 4G versions ;-) and
- more than 4G objects in a pack.
-
- - The header is followed by number of object entries, each of
- which looks like this:
-
- (undeltified representation)
- n-byte type and length (3-bit type, (n-1)*7+4-bit length)
- compressed data
-
- (deltified representation)
- n-byte type and length (3-bit type, (n-1)*7+4-bit length)
- 20-byte base object name
- compressed delta data
-
- Observation: length of each object is encoded in a variable
- length format and is not constrained to 32-bit or anything.
-
- - The trailer records 20-byte SHA1 checksum of all of the above.
-
-== Original (version 1) pack-*.idx files have the following format:
-
- - The header consists of 256 4-byte network byte order
- integers. N-th entry of this table records the number of
- objects in the corresponding pack, the first byte of whose
- object name is less than or equal to N. This is called the
- 'first-level fan-out' table.
-
- - The header is followed by sorted 24-byte entries, one entry
- per object in the pack. Each entry is:
-
- 4-byte network byte order integer, recording where the
- object is stored in the packfile as the offset from the
- beginning.
-
- 20-byte object name.
-
- - The file is concluded with a trailer:
-
- A copy of the 20-byte SHA1 checksum at the end of
- corresponding packfile.
-
- 20-byte SHA1-checksum of all of the above.
-
-Pack Idx file:
-
- -- +--------------------------------+
-fanout | fanout[0] = 2 (for example) |-.
-table +--------------------------------+ |
- | fanout[1] | |
- +--------------------------------+ |
- | fanout[2] | |
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
- | fanout[255] = total objects |---.
- -- +--------------------------------+ | |
-main | offset | | |
-index | object name 00XXXXXXXXXXXXXXXX | | |
-table +--------------------------------+ | |
- | offset | | |
- | object name 00XXXXXXXXXXXXXXXX | | |
- +--------------------------------+<+ |
- .-| offset | |
- | | object name 01XXXXXXXXXXXXXXXX | |
- | +--------------------------------+ |
- | | offset | |
- | | object name 01XXXXXXXXXXXXXXXX | |
- | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
- | | offset | |
- | | object name FFXXXXXXXXXXXXXXXX | |
- --| +--------------------------------+<--+
-trailer | | packfile checksum |
- | +--------------------------------+
- | | idxfile checksum |
- | +--------------------------------+
- .-------.
- |
-Pack file entry: <+
-
- packed object header:
- 1-byte size extension bit (MSB)
- type (next 3 bit)
- size0 (lower 4-bit)
- n-byte sizeN (as long as MSB is set, each 7-bit)
- size0..sizeN form 4+7+7+..+7 bit integer, size0
- is the least significant part, and sizeN is the
- most significant part.
- packed object data:
- If it is not DELTA, then deflated bytes (the size above
- is the size before compression).
- If it is REF_DELTA, then
- 20-byte base object name SHA1 (the size above is the
- size of the delta data that follows).
- delta data, deflated.
- If it is OFS_DELTA, then
- n-byte offset (see below) interpreted as a negative
- offset from the type-byte of the header of the
- ofs-delta entry (the size above is the size of
- the delta data that follows).
- delta data, deflated.
-
- offset encoding:
- n bytes with MSB set in all but the last one.
- The offset is then the number constructed by
- concatenating the lower 7 bit of each byte, and
- for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
- to the result.
-
-
-
-== Version 2 pack-*.idx files support packs larger than 4 GiB, and
- have some other reorganizations. They have the format:
-
- - A 4-byte magic number '\377tOc' which is an unreasonable
- fanout[0] value.
-
- - A 4-byte version number (= 2)
-
- - A 256-entry fan-out table just like v1.
-
- - A table of sorted 20-byte SHA1 object names. These are
- packed together without offset values to reduce the cache
- footprint of the binary search for a specific object name.
-
- - A table of 4-byte CRC32 values of the packed object data.
- This is new in v2 so compressed data can be copied directly
- from pack to pack during repacking without undetected
- data corruption.
-
- - A table of 4-byte offset values (in network byte order).
- These are usually 31-bit pack file offsets, but large
- offsets are encoded as an index into the next table with
- the msbit set.
-
- - A table of 8-byte offset entries (empty for pack files less
- than 2 GiB). Pack files are organized with heavily used
- objects toward the front, so most object references should
- not need to refer to this table.
-
- - The same trailer as a v1 pack file:
-
- A copy of the 20-byte SHA1 checksum at the end of
- corresponding packfile.
-
- 20-byte SHA1-checksum of all of the above.
-
-From:
-https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt
-*/
diff --git a/formats/packfile/error.go b/formats/packfile/error.go
deleted file mode 100644
index c0b9163..0000000
--- a/formats/packfile/error.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package packfile
-
-import "fmt"
-
-// Error specifies errors returned during packfile parsing.
-type Error struct {
- reason, details string
-}
-
-// NewError returns a new error.
-func NewError(reason string) *Error {
- return &Error{reason: reason}
-}
-
-// Error returns a text representation of the error.
-func (e *Error) Error() string {
- if e.details == "" {
- return e.reason
- }
-
- return fmt.Sprintf("%s: %s", e.reason, e.details)
-}
-
-// AddDetails adds details to an error, with additional text.
-func (e *Error) AddDetails(format string, args ...interface{}) *Error {
- return &Error{
- reason: e.reason,
- details: fmt.Sprintf(format, args...),
- }
-}
diff --git a/formats/packfile/scanner.go b/formats/packfile/scanner.go
deleted file mode 100644
index 69cc7d0..0000000
--- a/formats/packfile/scanner.go
+++ /dev/null
@@ -1,418 +0,0 @@
-package packfile
-
-import (
- "bufio"
- "bytes"
- "compress/zlib"
- "fmt"
- "hash"
- "hash/crc32"
- "io"
- "io/ioutil"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-var (
- // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile
- ErrEmptyPackfile = NewError("empty packfile")
- // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect.
- ErrBadSignature = NewError("malformed pack file signature")
- // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is
- // different than VersionSupported.
- ErrUnsupportedVersion = NewError("unsupported packfile version")
- // ErrSeekNotSupported returned if seek is not support
- ErrSeekNotSupported = NewError("not seek support")
-)
-
-const (
- // VersionSupported is the packfile version supported by this parser.
- VersionSupported uint32 = 2
-)
-
-// ObjectHeader contains the information related to the object, this information
-// is collected from the previous bytes to the content of the object.
-type ObjectHeader struct {
- Type core.ObjectType
- Offset int64
- Length int64
- Reference core.Hash
- OffsetReference int64
-}
-
-type Scanner struct {
- r reader
- crc hash.Hash32
-
- // pendingObject is used to detect if an object has been read, or still
- // is waiting to be read
- pendingObject *ObjectHeader
- version, objects uint32
-
- // lsSeekable says if this scanner can do Seek or not, to have a Scanner
- // seekable a r implementing io.Seeker is required
- IsSeekable bool
-}
-
-// NewScanner returns a new Scanner based on a reader, if the given reader
-// implements io.ReadSeeker the Scanner will be also Seekable
-func NewScanner(r io.Reader) *Scanner {
- seeker, ok := r.(io.ReadSeeker)
- if !ok {
- seeker = &trackableReader{Reader: r}
- }
-
- crc := crc32.NewIEEE()
- return &Scanner{
- r: &teeReader{
- newByteReadSeeker(seeker),
- crc,
- },
- crc: crc,
- IsSeekable: ok,
- }
-}
-
-// Header reads the whole packfile header (signature, version and object count).
-// It returns the version and the object count and performs checks on the
-// validity of the signature and the version fields.
-func (s *Scanner) Header() (version, objects uint32, err error) {
- if s.version != 0 {
- return s.version, s.objects, nil
- }
-
- sig, err := s.readSignature()
- if err != nil {
- if err == io.EOF {
- err = ErrEmptyPackfile
- }
-
- return
- }
-
- if !s.isValidSignature(sig) {
- err = ErrBadSignature
- return
- }
-
- version, err = s.readVersion()
- s.version = version
- if err != nil {
- return
- }
-
- if !s.isSupportedVersion(version) {
- err = ErrUnsupportedVersion.AddDetails("%d", version)
- return
- }
-
- objects, err = s.readCount()
- s.objects = objects
- return
-}
-
-// readSignature reads an returns the signature field in the packfile.
-func (s *Scanner) readSignature() ([]byte, error) {
- var sig = make([]byte, 4)
- if _, err := io.ReadFull(s.r, sig); err != nil {
- return []byte{}, err
- }
-
- return sig, nil
-}
-
-// isValidSignature returns if sig is a valid packfile signature.
-func (s *Scanner) isValidSignature(sig []byte) bool {
- return bytes.Equal(sig, []byte{'P', 'A', 'C', 'K'})
-}
-
-// readVersion reads and returns the version field of a packfile.
-func (s *Scanner) readVersion() (uint32, error) {
- return binary.ReadUint32(s.r)
-}
-
-// isSupportedVersion returns whether version v is supported by the parser.
-// The current supported version is VersionSupported, defined above.
-func (s *Scanner) isSupportedVersion(v uint32) bool {
- return v == VersionSupported
-}
-
-// readCount reads and returns the count of objects field of a packfile.
-func (s *Scanner) readCount() (uint32, error) {
- return binary.ReadUint32(s.r)
-}
-
-// NextObjectHeader returns the ObjectHeader for the next object in the reader
-func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
- if err := s.doPending(); err != nil {
- return nil, err
- }
-
- s.crc.Reset()
-
- h := &ObjectHeader{}
- s.pendingObject = h
-
- var err error
- h.Offset, err = s.r.Seek(0, io.SeekCurrent)
- if err != nil {
- return nil, err
- }
-
- h.Type, h.Length, err = s.readObjectTypeAndLength()
- if err != nil {
- return nil, err
- }
-
- switch h.Type {
- case core.OFSDeltaObject:
- no, err := binary.ReadVariableWidthInt(s.r)
- if err != nil {
- return nil, err
- }
-
- h.OffsetReference = h.Offset - no
- case core.REFDeltaObject:
- var err error
- h.Reference, err = binary.ReadHash(s.r)
- if err != nil {
- return nil, err
- }
- }
-
- return h, nil
-}
-
-func (s *Scanner) doPending() error {
- if s.version == 0 {
- var err error
- s.version, s.objects, err = s.Header()
- if err != nil {
- return err
- }
- }
-
- return s.discardObjectIfNeeded()
-}
-
-func (s *Scanner) discardObjectIfNeeded() error {
- if s.pendingObject == nil {
- return nil
- }
-
- h := s.pendingObject
- n, _, err := s.NextObject(ioutil.Discard)
- if err != nil {
- return err
- }
-
- if n != h.Length {
- return fmt.Errorf(
- "error discarding object, discarded %d, expected %d",
- n, h.Length,
- )
- }
-
- return nil
-}
-
-// ReadObjectTypeAndLength reads and returns the object type and the
-// length field from an object entry in a packfile.
-func (s *Scanner) readObjectTypeAndLength() (core.ObjectType, int64, error) {
- t, c, err := s.readType()
- if err != nil {
- return t, 0, err
- }
-
- l, err := s.readLength(c)
-
- return t, l, err
-}
-
-const (
- maskType = uint8(112) // 0111 0000
- maskFirstLength = uint8(15) // 0000 1111
- maskContinue = uint8(128) // 1000 000
- firstLengthBits = uint8(4) // the first byte has 4 bits to store the length
- maskLength = uint8(127) // 0111 1111
- lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length
-)
-
-func (s *Scanner) readType() (core.ObjectType, byte, error) {
- var c byte
- var err error
- if c, err = s.r.ReadByte(); err != nil {
- return core.ObjectType(0), 0, err
- }
-
- typ := parseType(c)
-
- return typ, c, nil
-}
-
-func parseType(b byte) core.ObjectType {
- return core.ObjectType((b & maskType) >> firstLengthBits)
-}
-
-// the length is codified in the last 4 bits of the first byte and in
-// the last 7 bits of subsequent bytes. Last byte has a 0 MSB.
-func (s *Scanner) readLength(first byte) (int64, error) {
- length := int64(first & maskFirstLength)
-
- c := first
- shift := firstLengthBits
- var err error
- for c&maskContinue > 0 {
- if c, err = s.r.ReadByte(); err != nil {
- return 0, err
- }
-
- length += int64(c&maskLength) << shift
- shift += lengthBits
- }
-
- return length, nil
-}
-
-// NextObject writes the content of the next object into the reader, returns
-// the number of bytes written, the CRC32 of the content and an error, if any
-func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
- defer s.crc.Reset()
-
- s.pendingObject = nil
- written, err = s.copyObject(w)
- crc32 = s.crc.Sum32()
- return
-}
-
-// ReadRegularObject reads and write a non-deltified object
-// from it zlib stream in an object entry in the packfile.
-func (s *Scanner) copyObject(w io.Writer) (int64, error) {
- zr, err := zlib.NewReader(s.r)
- if err != nil {
- return -1, fmt.Errorf("zlib reading error: %s", err)
- }
-
- defer func() {
- closeErr := zr.Close()
- if err == nil {
- err = closeErr
- }
- }()
-
- return io.Copy(w, zr)
-}
-
-// Seek sets a new offset from start, returns the old position before the change
-func (s *Scanner) Seek(offset int64) (previous int64, err error) {
- // if seeking we asume that you are not interested on the header
- if s.version == 0 {
- s.version = VersionSupported
- }
-
- previous, err = s.r.Seek(0, io.SeekCurrent)
- if err != nil {
- return -1, err
- }
-
- _, err = s.r.Seek(offset, io.SeekStart)
- return previous, err
-}
-
-// Checksum returns the checksum of the packfile
-func (s *Scanner) Checksum() (core.Hash, error) {
- err := s.discardObjectIfNeeded()
- if err != nil {
- return core.ZeroHash, err
- }
-
- return binary.ReadHash(s.r)
-}
-
-// Close reads the reader until io.EOF
-func (s *Scanner) Close() error {
- _, err := io.Copy(ioutil.Discard, s.r)
- return err
-}
-
-type trackableReader struct {
- count int64
- io.Reader
-}
-
-// Read reads up to len(p) bytes into p.
-func (r *trackableReader) Read(p []byte) (n int, err error) {
- n, err = r.Reader.Read(p)
- r.count += int64(n)
-
- return
-}
-
-// Seek only supports io.SeekCurrent, any other operation fails
-func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
- if whence != io.SeekCurrent {
- return -1, ErrSeekNotSupported
- }
-
- return r.count, nil
-}
-
-func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker {
- return &bufferedSeeker{
- r: r,
- Reader: *bufio.NewReader(r),
- }
-}
-
-type bufferedSeeker struct {
- r io.ReadSeeker
- bufio.Reader
-}
-
-func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
- if whence == io.SeekCurrent {
- current, err := r.r.Seek(offset, whence)
- if err != nil {
- return current, err
- }
-
- return current - int64(r.Buffered()), nil
- }
-
- defer r.Reader.Reset(r.r)
- return r.r.Seek(offset, whence)
-}
-
-type reader interface {
- io.Reader
- io.ByteReader
- io.Seeker
-}
-
-type teeReader struct {
- reader
- w hash.Hash32
-}
-
-func (r *teeReader) Read(p []byte) (n int, err error) {
- n, err = r.reader.Read(p)
- if n > 0 {
- if n, err := r.w.Write(p[:n]); err != nil {
- return n, err
- }
- }
- return
-}
-
-func (r *teeReader) ReadByte() (b byte, err error) {
- b, err = r.reader.ReadByte()
- if err == nil {
- _, err := r.w.Write([]byte{b})
- if err != nil {
- return 0, err
- }
- }
-
- return
-}
diff --git a/formats/packfile/scanner_test.go b/formats/packfile/scanner_test.go
deleted file mode 100644
index 5f80da0..0000000
--- a/formats/packfile/scanner_test.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package packfile
-
-import (
- "bytes"
- "io"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/fixtures"
-)
-
-type ScannerSuite struct {
- fixtures.Suite
-}
-
-var _ = Suite(&ScannerSuite{})
-
-func (s *ScannerSuite) TestHeader(c *C) {
- r := fixtures.Basic().One().Packfile()
- p := NewScanner(r)
-
- version, objects, err := p.Header()
- c.Assert(err, IsNil)
- c.Assert(version, Equals, VersionSupported)
- c.Assert(objects, Equals, uint32(31))
-}
-
-func (s *ScannerSuite) TestNextObjectHeaderWithoutHeader(c *C) {
- r := fixtures.Basic().One().Packfile()
- p := NewScanner(r)
-
- h, err := p.NextObjectHeader()
- c.Assert(err, IsNil)
- c.Assert(h, DeepEquals, &expectedHeadersOFS[0])
-
- version, objects, err := p.Header()
- c.Assert(err, IsNil)
- c.Assert(version, Equals, VersionSupported)
- c.Assert(objects, Equals, uint32(31))
-}
-
-func (s *ScannerSuite) TestNextObjectHeaderREFDelta(c *C) {
- s.testNextObjectHeader(c, "ref-delta", expectedHeadersREF)
-}
-
-func (s *ScannerSuite) TestNextObjectHeaderOFSDelta(c *C) {
- s.testNextObjectHeader(c, "ofs-delta", expectedHeadersOFS)
-}
-
-func (s *ScannerSuite) testNextObjectHeader(c *C, tag string, expected []ObjectHeader) {
- r := fixtures.Basic().ByTag(tag).One().Packfile()
- p := NewScanner(r)
-
- _, objects, err := p.Header()
- c.Assert(err, IsNil)
-
- for i := 0; i < int(objects); i++ {
- h, err := p.NextObjectHeader()
- c.Assert(err, IsNil)
- c.Assert(*h, DeepEquals, expected[i])
-
- buf := bytes.NewBuffer(nil)
- n, _, err := p.NextObject(buf)
- c.Assert(err, IsNil)
- c.Assert(n, Equals, h.Length)
- }
-
- n, err := p.Checksum()
- c.Assert(err, IsNil)
- c.Assert(n, HasLen, 20)
-}
-
-func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObject(c *C) {
- f := fixtures.Basic().ByTag("ref-delta").One()
- r := f.Packfile()
- p := NewScanner(r)
-
- _, objects, err := p.Header()
- c.Assert(err, IsNil)
-
- for i := 0; i < int(objects); i++ {
- h, _ := p.NextObjectHeader()
- c.Assert(err, IsNil)
- c.Assert(*h, DeepEquals, expectedHeadersREF[i])
- }
-
- err = p.discardObjectIfNeeded()
- c.Assert(err, IsNil)
-
- n, err := p.Checksum()
- c.Assert(err, IsNil)
- c.Assert(n, Equals, f.PackfileHash)
-}
-
-func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObjectNonSeekable(c *C) {
- f := fixtures.Basic().ByTag("ref-delta").One()
- r := io.MultiReader(f.Packfile())
- p := NewScanner(r)
-
- _, objects, err := p.Header()
- c.Assert(err, IsNil)
-
- for i := 0; i < int(objects); i++ {
- h, _ := p.NextObjectHeader()
- c.Assert(err, IsNil)
- c.Assert(*h, DeepEquals, expectedHeadersREF[i])
- }
-
- err = p.discardObjectIfNeeded()
- c.Assert(err, IsNil)
-
- n, err := p.Checksum()
- c.Assert(err, IsNil)
- c.Assert(n, Equals, f.PackfileHash)
-}
-
-var expectedHeadersOFS = []ObjectHeader{
- {Type: core.CommitObject, Offset: 12, Length: 254},
- {Type: core.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12},
- {Type: core.CommitObject, Offset: 286, Length: 242},
- {Type: core.CommitObject, Offset: 449, Length: 242},
- {Type: core.CommitObject, Offset: 615, Length: 333},
- {Type: core.CommitObject, Offset: 838, Length: 332},
- {Type: core.CommitObject, Offset: 1063, Length: 244},
- {Type: core.CommitObject, Offset: 1230, Length: 243},
- {Type: core.CommitObject, Offset: 1392, Length: 187},
- {Type: core.BlobObject, Offset: 1524, Length: 189},
- {Type: core.BlobObject, Offset: 1685, Length: 18},
- {Type: core.BlobObject, Offset: 1713, Length: 1072},
- {Type: core.BlobObject, Offset: 2351, Length: 76110},
- {Type: core.BlobObject, Offset: 78050, Length: 2780},
- {Type: core.BlobObject, Offset: 78882, Length: 217848},
- {Type: core.BlobObject, Offset: 80725, Length: 706},
- {Type: core.BlobObject, Offset: 80998, Length: 11488},
- {Type: core.BlobObject, Offset: 84032, Length: 78},
- {Type: core.TreeObject, Offset: 84115, Length: 272},
- {Type: core.OFSDeltaObject, Offset: 84375, Length: 43, OffsetReference: 84115},
- {Type: core.TreeObject, Offset: 84430, Length: 38},
- {Type: core.TreeObject, Offset: 84479, Length: 75},
- {Type: core.TreeObject, Offset: 84559, Length: 38},
- {Type: core.TreeObject, Offset: 84608, Length: 34},
- {Type: core.BlobObject, Offset: 84653, Length: 9},
- {Type: core.OFSDeltaObject, Offset: 84671, Length: 6, OffsetReference: 84375},
- {Type: core.OFSDeltaObject, Offset: 84688, Length: 9, OffsetReference: 84375},
- {Type: core.OFSDeltaObject, Offset: 84708, Length: 6, OffsetReference: 84375},
- {Type: core.OFSDeltaObject, Offset: 84725, Length: 5, OffsetReference: 84115},
- {Type: core.OFSDeltaObject, Offset: 84741, Length: 8, OffsetReference: 84375},
- {Type: core.OFSDeltaObject, Offset: 84760, Length: 4, OffsetReference: 84741},
-}
-
-var expectedHeadersREF = []ObjectHeader{
- {Type: core.CommitObject, Offset: 12, Length: 254},
- {Type: core.REFDeltaObject, Offset: 186, Length: 93,
- Reference: core.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881")},
- {Type: core.CommitObject, Offset: 304, Length: 242},
- {Type: core.CommitObject, Offset: 467, Length: 242},
- {Type: core.CommitObject, Offset: 633, Length: 333},
- {Type: core.CommitObject, Offset: 856, Length: 332},
- {Type: core.CommitObject, Offset: 1081, Length: 243},
- {Type: core.CommitObject, Offset: 1243, Length: 244},
- {Type: core.CommitObject, Offset: 1410, Length: 187},
- {Type: core.BlobObject, Offset: 1542, Length: 189},
- {Type: core.BlobObject, Offset: 1703, Length: 18},
- {Type: core.BlobObject, Offset: 1731, Length: 1072},
- {Type: core.BlobObject, Offset: 2369, Length: 76110},
- {Type: core.TreeObject, Offset: 78068, Length: 38},
- {Type: core.BlobObject, Offset: 78117, Length: 2780},
- {Type: core.TreeObject, Offset: 79049, Length: 75},
- {Type: core.BlobObject, Offset: 79129, Length: 217848},
- {Type: core.BlobObject, Offset: 80972, Length: 706},
- {Type: core.TreeObject, Offset: 81265, Length: 38},
- {Type: core.BlobObject, Offset: 81314, Length: 11488},
- {Type: core.TreeObject, Offset: 84752, Length: 34},
- {Type: core.BlobObject, Offset: 84797, Length: 78},
- {Type: core.TreeObject, Offset: 84880, Length: 271},
- {Type: core.REFDeltaObject, Offset: 85141, Length: 6,
- Reference: core.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")},
- {Type: core.REFDeltaObject, Offset: 85176, Length: 37,
- Reference: core.NewHash("fb72698cab7617ac416264415f13224dfd7a165e")},
- {Type: core.BlobObject, Offset: 85244, Length: 9},
- {Type: core.REFDeltaObject, Offset: 85262, Length: 9,
- Reference: core.NewHash("fb72698cab7617ac416264415f13224dfd7a165e")},
- {Type: core.REFDeltaObject, Offset: 85300, Length: 6,
- Reference: core.NewHash("fb72698cab7617ac416264415f13224dfd7a165e")},
- {Type: core.TreeObject, Offset: 85335, Length: 110},
- {Type: core.REFDeltaObject, Offset: 85448, Length: 8,
- Reference: core.NewHash("eba74343e2f15d62adedfd8c883ee0262b5c8021")},
- {Type: core.TreeObject, Offset: 85485, Length: 73},
-}
diff --git a/formats/packp/advrefs/advrefs.go b/formats/packp/advrefs/advrefs.go
deleted file mode 100644
index ab4bcf5..0000000
--- a/formats/packp/advrefs/advrefs.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Package advrefs implements encoding and decoding advertised-refs
-// messages from a git-upload-pack command.
-package advrefs
-
-import (
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp"
-)
-
-const (
- hashSize = 40
- head = "HEAD"
- noHead = "capabilities^{}"
-)
-
-var (
- sp = []byte(" ")
- null = []byte("\x00")
- eol = []byte("\n")
- peeled = []byte("^{}")
- shallow = []byte("shallow ")
- noHeadMark = []byte(" capabilities^{}\x00")
-)
-
-// AdvRefs values represent the information transmitted on an
-// advertised-refs message. Values from this type are not zero-value
-// safe, use the New function instead.
-//
-// When using this messages over (smart) HTTP, you have to add a pktline
-// before the whole thing with the following payload:
-//
-// '# service=$servicename" LF
-//
-// Moreover, some (all) git HTTP smart servers will send a flush-pkt
-// just after the first pkt-line.
-//
-// To accomodate both situations, the Prefix field allow you to store
-// any data you want to send before the actual pktlines. It will also
-// be filled up with whatever is found on the line.
-type AdvRefs struct {
- Prefix [][]byte // payloads of the prefix
- Head *core.Hash
- Capabilities *packp.Capabilities
- References map[string]core.Hash
- Peeled map[string]core.Hash
- Shallows []core.Hash
-}
-
-// New returns a pointer to a new AdvRefs value, ready to be used.
-func New() *AdvRefs {
- return &AdvRefs{
- Prefix: [][]byte{},
- Capabilities: packp.NewCapabilities(),
- References: make(map[string]core.Hash),
- Peeled: make(map[string]core.Hash),
- Shallows: []core.Hash{},
- }
-}
diff --git a/formats/packp/advrefs/advrefs_test.go b/formats/packp/advrefs/advrefs_test.go
deleted file mode 100644
index 6950ba5..0000000
--- a/formats/packp/advrefs/advrefs_test.go
+++ /dev/null
@@ -1,315 +0,0 @@
-package advrefs_test
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
- "testing"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp/advrefs"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-
- . "gopkg.in/check.v1"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-type SuiteDecodeEncode struct{}
-
-var _ = Suite(&SuiteDecodeEncode{})
-
-func (s *SuiteDecodeEncode) test(c *C, in []string, exp []string) {
- var err error
- var input io.Reader
- {
- var buf bytes.Buffer
- p := pktline.NewEncoder(&buf)
- err = p.EncodeString(in...)
- c.Assert(err, IsNil)
- input = &buf
- }
-
- var expected []byte
- {
- var buf bytes.Buffer
- p := pktline.NewEncoder(&buf)
- err = p.EncodeString(exp...)
- c.Assert(err, IsNil)
-
- expected = buf.Bytes()
- }
-
- var obtained []byte
- {
- ar := advrefs.New()
- d := advrefs.NewDecoder(input)
- err = d.Decode(ar)
- c.Assert(err, IsNil)
-
- var buf bytes.Buffer
- e := advrefs.NewEncoder(&buf)
- err := e.Encode(ar)
- c.Assert(err, IsNil)
-
- obtained = buf.Bytes()
- }
-
- c.Assert(obtained, DeepEquals, expected,
- Commentf("input = %v\nobtained = %q\nexpected = %q\n",
- in, string(obtained), string(expected)))
-}
-
-func (s *SuiteDecodeEncode) TestNoHead(c *C) {
- input := []string{
- "0000000000000000000000000000000000000000 capabilities^{}\x00",
- pktline.FlushString,
- }
-
- expected := []string{
- "0000000000000000000000000000000000000000 capabilities^{}\x00\n",
- pktline.FlushString,
- }
-
- s.test(c, input, expected)
-}
-
-func (s *SuiteDecodeEncode) TestNoHeadSmart(c *C) {
- input := []string{
- "# service=git-upload-pack\n",
- "0000000000000000000000000000000000000000 capabilities^{}\x00",
- pktline.FlushString,
- }
-
- expected := []string{
- "# service=git-upload-pack\n",
- "0000000000000000000000000000000000000000 capabilities^{}\x00\n",
- pktline.FlushString,
- }
-
- s.test(c, input, expected)
-}
-
-func (s *SuiteDecodeEncode) TestNoHeadSmartBug(c *C) {
- input := []string{
- "# service=git-upload-pack\n",
- pktline.FlushString,
- "0000000000000000000000000000000000000000 capabilities^{}\x00\n",
- pktline.FlushString,
- }
-
- expected := []string{
- "# service=git-upload-pack\n",
- pktline.FlushString,
- "0000000000000000000000000000000000000000 capabilities^{}\x00\n",
- pktline.FlushString,
- }
-
- s.test(c, input, expected)
-}
-
-func (s *SuiteDecodeEncode) TestRefs(c *C) {
- input := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree",
- pktline.FlushString,
- }
-
- expected := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
- pktline.FlushString,
- }
-
- s.test(c, input, expected)
-}
-
-func (s *SuiteDecodeEncode) TestPeeled(c *C) {
- input := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
- "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- pktline.FlushString,
- }
-
- expected := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
- "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
- pktline.FlushString,
- }
-
- s.test(c, input, expected)
-}
-
-func (s *SuiteDecodeEncode) TestAll(c *C) {
- input := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
- "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}",
- "shallow 1111111111111111111111111111111111111111",
- "shallow 2222222222222222222222222222222222222222\n",
- pktline.FlushString,
- }
-
- expected := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
- "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
- "shallow 1111111111111111111111111111111111111111\n",
- "shallow 2222222222222222222222222222222222222222\n",
- pktline.FlushString,
- }
-
- s.test(c, input, expected)
-}
-
-func (s *SuiteDecodeEncode) TestAllSmart(c *C) {
- input := []string{
- "# service=git-upload-pack\n",
- pktline.FlushString,
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
- "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
- "shallow 1111111111111111111111111111111111111111\n",
- "shallow 2222222222222222222222222222222222222222\n",
- pktline.FlushString,
- }
-
- expected := []string{
- "# service=git-upload-pack\n",
- pktline.FlushString,
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
- "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
- "shallow 1111111111111111111111111111111111111111\n",
- "shallow 2222222222222222222222222222222222222222\n",
- pktline.FlushString,
- }
-
- s.test(c, input, expected)
-}
-
-func (s *SuiteDecodeEncode) TestAllSmartBug(c *C) {
- input := []string{
- "# service=git-upload-pack\n",
- pktline.FlushString,
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
- "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
- "shallow 1111111111111111111111111111111111111111\n",
- "shallow 2222222222222222222222222222222222222222\n",
- pktline.FlushString,
- }
-
- expected := []string{
- "# service=git-upload-pack\n",
- pktline.FlushString,
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
- "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
- "shallow 1111111111111111111111111111111111111111\n",
- "shallow 2222222222222222222222222222222222222222\n",
- pktline.FlushString,
- }
-
- s.test(c, input, expected)
-}
-
-func ExampleDecoder_Decode() {
- // Here is a raw advertised-ref message.
- raw := "" +
- "0065a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n" +
- "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" +
- "00441111111111111111111111111111111111111111 refs/tags/v2.6.11-tree\n" +
- "00475555555555555555555555555555555555555555 refs/tags/v2.6.11-tree^{}\n" +
- "0035shallow 5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c\n" +
- "0000"
-
- // Use the raw message as our input.
- input := strings.NewReader(raw)
-
- // Create a advref.Decoder reading from our input.
- d := advrefs.NewDecoder(input)
-
- // Decode the input into a newly allocated AdvRefs value.
- ar := advrefs.New()
- _ = d.Decode(ar) // error check ignored for brevity
-
- // Do something interesting with the AdvRefs, e.g. print its contents.
- fmt.Println("head =", ar.Head)
- fmt.Println("capabilities =", ar.Capabilities.String())
- fmt.Println("...")
- fmt.Println("shallows =", ar.Shallows)
- // Output: head = a6930aaee06755d1bdcfd943fbf614e4d92bb0c7
- // capabilities = multi_ack ofs-delta symref=HEAD:/refs/heads/master
- // ...
- // shallows = [5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c]
-}
-
-func ExampleEncoder_Encode() {
- // Create an AdvRefs with the contents you want...
- ar := advrefs.New()
-
- // ...add a hash for the HEAD...
- head := core.NewHash("1111111111111111111111111111111111111111")
- ar.Head = &head
-
- // ...add some server capabilities...
- ar.Capabilities.Add("symref", "HEAD:/refs/heads/master")
- ar.Capabilities.Add("ofs-delta")
- ar.Capabilities.Add("multi_ack")
-
- // ...add a couple of references...
- ar.References["refs/heads/master"] = core.NewHash("2222222222222222222222222222222222222222")
- ar.References["refs/tags/v1"] = core.NewHash("3333333333333333333333333333333333333333")
-
- // ...including a peeled ref...
- ar.Peeled["refs/tags/v1"] = core.NewHash("4444444444444444444444444444444444444444")
-
- // ...and finally add a shallow
- ar.Shallows = append(ar.Shallows, core.NewHash("5555555555555555555555555555555555555555"))
-
- // Encode the advrefs.Contents to a bytes.Buffer.
- // You can encode into stdout too, but you will not be able
- // see the '\x00' after "HEAD".
- var buf bytes.Buffer
- e := advrefs.NewEncoder(&buf)
- _ = e.Encode(ar) // error checks ignored for brevity
-
- // Print the contents of the buffer as a quoted string.
- // Printing is as a non-quoted string will be prettier but you
- // will miss the '\x00' after "HEAD".
- fmt.Printf("%q", buf.String())
- // Output:
- // "00651111111111111111111111111111111111111111 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n003f2222222222222222222222222222222222222222 refs/heads/master\n003a3333333333333333333333333333333333333333 refs/tags/v1\n003d4444444444444444444444444444444444444444 refs/tags/v1^{}\n0035shallow 5555555555555555555555555555555555555555\n0000"
-}
diff --git a/formats/packp/advrefs/decoder.go b/formats/packp/advrefs/decoder.go
deleted file mode 100644
index a0cf5e6..0000000
--- a/formats/packp/advrefs/decoder.go
+++ /dev/null
@@ -1,288 +0,0 @@
-package advrefs
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-)
-
-// A Decoder reads and decodes AdvRef values from an input stream.
-type Decoder struct {
- s *pktline.Scanner // a pkt-line scanner from the input stream
- line []byte // current pkt-line contents, use parser.nextLine() to make it advance
- nLine int // current pkt-line number for debugging, begins at 1
- hash core.Hash // last hash read
- err error // sticky error, use the parser.error() method to fill this out
- data *AdvRefs // parsed data is stored here
-}
-
-// ErrEmpty is returned by Decode when there was no advertised-message at all
-var ErrEmpty = errors.New("empty advertised-ref message")
-
-// NewDecoder returns a new decoder that reads from r.
-//
-// Will not read more data from r than necessary.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{
- s: pktline.NewScanner(r),
- }
-}
-
-// Decode reads the next advertised-refs message form its input and
-// stores it in the value pointed to by v.
-func (d *Decoder) Decode(v *AdvRefs) error {
- d.data = v
-
- for state := decodePrefix; state != nil; {
- state = state(d)
- }
-
- return d.err
-}
-
-type decoderStateFn func(*Decoder) decoderStateFn
-
-// fills out the parser stiky error
-func (d *Decoder) error(format string, a ...interface{}) {
- d.err = fmt.Errorf("pkt-line %d: %s", d.nLine,
- fmt.Sprintf(format, a...))
-}
-
-// Reads a new pkt-line from the scanner, makes its payload available as
-// p.line and increments p.nLine. A successful invocation returns true,
-// otherwise, false is returned and the sticky error is filled out
-// accordingly. Trims eols at the end of the payloads.
-func (d *Decoder) nextLine() bool {
- d.nLine++
-
- if !d.s.Scan() {
- if d.err = d.s.Err(); d.err != nil {
- return false
- }
-
- if d.nLine == 1 {
- d.err = ErrEmpty
- return false
- }
-
- d.error("EOF")
- return false
- }
-
- d.line = d.s.Bytes()
- d.line = bytes.TrimSuffix(d.line, eol)
-
- return true
-}
-
-// The HTTP smart prefix is often followed by a flush-pkt.
-func decodePrefix(d *Decoder) decoderStateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if isPrefix(d.line) {
- tmp := make([]byte, len(d.line))
- copy(tmp, d.line)
- d.data.Prefix = append(d.data.Prefix, tmp)
- if ok := d.nextLine(); !ok {
- return nil
- }
- }
-
- if isFlush(d.line) {
- d.data.Prefix = append(d.data.Prefix, pktline.Flush)
- if ok := d.nextLine(); !ok {
- return nil
- }
- }
-
- return decodeFirstHash
-}
-
-func isPrefix(payload []byte) bool {
- return payload[0] == '#'
-}
-
-func isFlush(payload []byte) bool {
- return len(payload) == 0
-}
-
-// If the first hash is zero, then a no-refs is comming. Otherwise, a
-// list-of-refs is comming, and the hash will be followed by the first
-// advertised ref.
-func decodeFirstHash(p *Decoder) decoderStateFn {
- if len(p.line) < hashSize {
- p.error("cannot read hash, pkt-line too short")
- return nil
- }
-
- if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil {
- p.error("invalid hash text: %s", err)
- return nil
- }
-
- p.line = p.line[hashSize:]
-
- if p.hash.IsZero() {
- return decodeSkipNoRefs
- }
-
- return decodeFirstRef
-}
-
-// Skips SP "capabilities^{}" NUL
-func decodeSkipNoRefs(p *Decoder) decoderStateFn {
- if len(p.line) < len(noHeadMark) {
- p.error("too short zero-id ref")
- return nil
- }
-
- if !bytes.HasPrefix(p.line, noHeadMark) {
- p.error("malformed zero-id ref")
- return nil
- }
-
- p.line = p.line[len(noHeadMark):]
-
- return decodeCaps
-}
-
-// decode the refname, expectes SP refname NULL
-func decodeFirstRef(l *Decoder) decoderStateFn {
- if len(l.line) < 3 {
- l.error("line too short after hash")
- return nil
- }
-
- if !bytes.HasPrefix(l.line, sp) {
- l.error("no space after hash")
- return nil
- }
- l.line = l.line[1:]
-
- chunks := bytes.SplitN(l.line, null, 2)
- if len(chunks) < 2 {
- l.error("NULL not found")
- return nil
- }
- ref := chunks[0]
- l.line = chunks[1]
-
- if bytes.Equal(ref, []byte(head)) {
- l.data.Head = &l.hash
- } else {
- l.data.References[string(ref)] = l.hash
- }
-
- return decodeCaps
-}
-
-func decodeCaps(p *Decoder) decoderStateFn {
- if len(p.line) == 0 {
- return decodeOtherRefs
- }
-
- for _, c := range bytes.Split(p.line, sp) {
- name, values := readCapability(c)
- p.data.Capabilities.Add(name, values...)
- }
-
- return decodeOtherRefs
-}
-
-// Capabilities are a single string or a name=value.
-// Even though we are only going to read at moust 1 value, we return
-// a slice of values, as Capability.Add receives that.
-func readCapability(data []byte) (name string, values []string) {
- pair := bytes.SplitN(data, []byte{'='}, 2)
- if len(pair) == 2 {
- values = append(values, string(pair[1]))
- }
-
- return string(pair[0]), values
-}
-
-// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}).
-// If there are no refs, then there might be a shallow or flush-ptk.
-func decodeOtherRefs(p *Decoder) decoderStateFn {
- if ok := p.nextLine(); !ok {
- return nil
- }
-
- if bytes.HasPrefix(p.line, shallow) {
- return decodeShallow
- }
-
- if len(p.line) == 0 {
- return nil
- }
-
- saveTo := p.data.References
- if bytes.HasSuffix(p.line, peeled) {
- p.line = bytes.TrimSuffix(p.line, peeled)
- saveTo = p.data.Peeled
- }
-
- ref, hash, err := readRef(p.line)
- if err != nil {
- p.error("%s", err)
- return nil
- }
- saveTo[ref] = hash
-
- return decodeOtherRefs
-}
-
-// Reads a ref-name
-func readRef(data []byte) (string, core.Hash, error) {
- chunks := bytes.Split(data, sp)
- switch {
- case len(chunks) == 1:
- return "", core.ZeroHash, fmt.Errorf("malformed ref data: no space was found")
- case len(chunks) > 2:
- return "", core.ZeroHash, fmt.Errorf("malformed ref data: more than one space found")
- default:
- return string(chunks[1]), core.NewHash(string(chunks[0])), nil
- }
-}
-
-// Keeps reading shallows until a flush-pkt is found
-func decodeShallow(p *Decoder) decoderStateFn {
- if !bytes.HasPrefix(p.line, shallow) {
- p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)])
- return nil
- }
- p.line = bytes.TrimPrefix(p.line, shallow)
-
- if len(p.line) != hashSize {
- p.error(fmt.Sprintf(
- "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes",
- len(p.line)))
- return nil
- }
-
- text := p.line[:hashSize]
- var h core.Hash
- if _, err := hex.Decode(h[:], text); err != nil {
- p.error("invalid hash text: %s", err)
- return nil
- }
-
- p.data.Shallows = append(p.data.Shallows, h)
-
- if ok := p.nextLine(); !ok {
- return nil
- }
-
- if len(p.line) == 0 {
- return nil // succesfull parse of the advertised-refs message
- }
-
- return decodeShallow
-}
diff --git a/formats/packp/advrefs/decoder_test.go b/formats/packp/advrefs/decoder_test.go
deleted file mode 100644
index ee2f5ae..0000000
--- a/formats/packp/advrefs/decoder_test.go
+++ /dev/null
@@ -1,500 +0,0 @@
-package advrefs_test
-
-import (
- "bytes"
- "io"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp"
- "gopkg.in/src-d/go-git.v4/formats/packp/advrefs"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-
- . "gopkg.in/check.v1"
-)
-
-type SuiteDecoder struct{}
-
-var _ = Suite(&SuiteDecoder{})
-
-func (s *SuiteDecoder) TestEmpty(c *C) {
- ar := advrefs.New()
- var buf bytes.Buffer
- d := advrefs.NewDecoder(&buf)
-
- err := d.Decode(ar)
- c.Assert(err, Equals, advrefs.ErrEmpty)
-}
-
-func (s *SuiteDecoder) TestShortForHash(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*too short")
-}
-
-func toPktLines(c *C, payloads []string) io.Reader {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
- err := e.EncodeString(payloads...)
- c.Assert(err, IsNil)
-
- return &buf
-}
-
-func testDecoderErrorMatches(c *C, input io.Reader, pattern string) {
- ar := advrefs.New()
- d := advrefs.NewDecoder(input)
-
- err := d.Decode(ar)
- c.Assert(err, ErrorMatches, pattern)
-}
-
-func (s *SuiteDecoder) TestInvalidFirstHash(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796alberto2219af86ec6584e5 HEAD\x00multi_ack thin-pack\n",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*invalid hash.*")
-}
-
-func (s *SuiteDecoder) TestZeroId(c *C) {
- payloads := []string{
- "0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack thin-pack\n",
- pktline.FlushString,
- }
- ar := testDecodeOK(c, payloads)
- c.Assert(ar.Head, IsNil)
-}
-
-func testDecodeOK(c *C, payloads []string) *advrefs.AdvRefs {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
- err := e.EncodeString(payloads...)
- c.Assert(err, IsNil)
-
- ar := advrefs.New()
- d := advrefs.NewDecoder(&buf)
-
- err = d.Decode(ar)
- c.Assert(err, IsNil)
-
- return ar
-}
-
-func (s *SuiteDecoder) TestMalformedZeroId(c *C) {
- payloads := []string{
- "0000000000000000000000000000000000000000 wrong\x00multi_ack thin-pack\n",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*malformed zero-id.*")
-}
-
-func (s *SuiteDecoder) TestShortZeroId(c *C) {
- payloads := []string{
- "0000000000000000000000000000000000000000 capabi",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*too short zero-id.*")
-}
-
-func (s *SuiteDecoder) TestHead(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00",
- pktline.FlushString,
- }
- ar := testDecodeOK(c, payloads)
- c.Assert(*ar.Head, Equals,
- core.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
-}
-
-func (s *SuiteDecoder) TestFirstIsNotHead(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\x00",
- pktline.FlushString,
- }
- ar := testDecodeOK(c, payloads)
- c.Assert(ar.Head, IsNil)
- c.Assert(ar.References["refs/heads/master"], Equals,
- core.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
-}
-
-func (s *SuiteDecoder) TestShortRef(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 H",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*too short.*")
-}
-
-func (s *SuiteDecoder) TestNoNULL(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEADofs-delta multi_ack",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*NULL not found.*")
-}
-
-func (s *SuiteDecoder) TestNoSpaceAfterHash(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5-HEAD\x00",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*no space after hash.*")
-}
-
-func (s *SuiteDecoder) TestNoCaps(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00",
- pktline.FlushString,
- }
- ar := testDecodeOK(c, payloads)
- c.Assert(ar.Capabilities.IsEmpty(), Equals, true)
-}
-
-func (s *SuiteDecoder) TestCaps(c *C) {
- for _, test := range [...]struct {
- input []string
- capabilities []packp.Capability
- }{
- {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00",
- pktline.FlushString,
- },
- capabilities: []packp.Capability{},
- },
- {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n",
- pktline.FlushString,
- },
- capabilities: []packp.Capability{},
- },
- {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta",
- pktline.FlushString,
- },
- capabilities: []packp.Capability{
- {
- Name: "ofs-delta",
- Values: []string(nil),
- },
- },
- },
- {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack",
- pktline.FlushString,
- },
- capabilities: []packp.Capability{
- {Name: "ofs-delta", Values: []string(nil)},
- {Name: "multi_ack", Values: []string(nil)},
- },
- },
- {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack\n",
- pktline.FlushString,
- },
- capabilities: []packp.Capability{
- {Name: "ofs-delta", Values: []string(nil)},
- {Name: "multi_ack", Values: []string(nil)},
- },
- },
- {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:refs/heads/master agent=foo=bar\n",
- pktline.FlushString,
- },
- capabilities: []packp.Capability{
- {Name: "symref", Values: []string{"HEAD:refs/heads/master"}},
- {Name: "agent", Values: []string{"foo=bar"}},
- },
- },
- {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:refs/heads/master agent=foo=bar agent=new-agent\n",
- pktline.FlushString,
- },
- capabilities: []packp.Capability{
- {Name: "symref", Values: []string{"HEAD:refs/heads/master"}},
- {Name: "agent", Values: []string{"foo=bar", "new-agent"}},
- },
- },
- } {
- ar := testDecodeOK(c, test.input)
- for _, fixCap := range test.capabilities {
- c.Assert(ar.Capabilities.Supports(fixCap.Name), Equals, true,
- Commentf("input = %q, capability = %q", test.input, fixCap.Name))
- c.Assert(ar.Capabilities.Get(fixCap.Name).Values, DeepEquals, fixCap.Values,
- Commentf("input = %q, capability = %q", test.input, fixCap.Name))
- }
- }
-}
-
-func (s *SuiteDecoder) TestWithPrefix(c *C) {
- payloads := []string{
- "# this is a prefix\n",
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00foo\n",
- pktline.FlushString,
- }
- ar := testDecodeOK(c, payloads)
- c.Assert(len(ar.Prefix), Equals, 1)
- c.Assert(ar.Prefix[0], DeepEquals, []byte("# this is a prefix"))
-}
-
-func (s *SuiteDecoder) TestWithPrefixAndFlush(c *C) {
- payloads := []string{
- "# this is a prefix\n",
- pktline.FlushString,
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00foo\n",
- pktline.FlushString,
- }
- ar := testDecodeOK(c, payloads)
- c.Assert(len(ar.Prefix), Equals, 2)
- c.Assert(ar.Prefix[0], DeepEquals, []byte("# this is a prefix"))
- c.Assert(ar.Prefix[1], DeepEquals, []byte(pktline.FlushString))
-}
-
-func (s *SuiteDecoder) TestOtherRefs(c *C) {
- for _, test := range [...]struct {
- input []string
- references map[string]core.Hash
- peeled map[string]core.Hash
- }{
- {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- pktline.FlushString,
- },
- references: make(map[string]core.Hash),
- peeled: make(map[string]core.Hash),
- }, {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "1111111111111111111111111111111111111111 ref/foo",
- pktline.FlushString,
- },
- references: map[string]core.Hash{
- "ref/foo": core.NewHash("1111111111111111111111111111111111111111"),
- },
- peeled: make(map[string]core.Hash),
- }, {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "1111111111111111111111111111111111111111 ref/foo\n",
- pktline.FlushString,
- },
- references: map[string]core.Hash{
- "ref/foo": core.NewHash("1111111111111111111111111111111111111111"),
- },
- peeled: make(map[string]core.Hash),
- }, {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "1111111111111111111111111111111111111111 ref/foo\n",
- "2222222222222222222222222222222222222222 ref/bar",
- pktline.FlushString,
- },
- references: map[string]core.Hash{
- "ref/foo": core.NewHash("1111111111111111111111111111111111111111"),
- "ref/bar": core.NewHash("2222222222222222222222222222222222222222"),
- },
- peeled: make(map[string]core.Hash),
- }, {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "1111111111111111111111111111111111111111 ref/foo^{}\n",
- pktline.FlushString,
- },
- references: make(map[string]core.Hash),
- peeled: map[string]core.Hash{
- "ref/foo": core.NewHash("1111111111111111111111111111111111111111"),
- },
- }, {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "1111111111111111111111111111111111111111 ref/foo\n",
- "2222222222222222222222222222222222222222 ref/bar^{}",
- pktline.FlushString,
- },
- references: map[string]core.Hash{
- "ref/foo": core.NewHash("1111111111111111111111111111111111111111"),
- },
- peeled: map[string]core.Hash{
- "ref/bar": core.NewHash("2222222222222222222222222222222222222222"),
- },
- }, {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "51b8b4fb32271d39fbdd760397406177b2b0fd36 refs/pull/10/head\n",
- "02b5a6031ba7a8cbfde5d65ff9e13ecdbc4a92ca refs/pull/100/head\n",
- "c284c212704c43659bf5913656b8b28e32da1621 refs/pull/100/merge\n",
- "3d6537dce68c8b7874333a1720958bd8db3ae8ca refs/pull/101/merge\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11^{}\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- pktline.FlushString,
- },
- references: map[string]core.Hash{
- "refs/heads/master": core.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
- "refs/pull/10/head": core.NewHash("51b8b4fb32271d39fbdd760397406177b2b0fd36"),
- "refs/pull/100/head": core.NewHash("02b5a6031ba7a8cbfde5d65ff9e13ecdbc4a92ca"),
- "refs/pull/100/merge": core.NewHash("c284c212704c43659bf5913656b8b28e32da1621"),
- "refs/pull/101/merge": core.NewHash("3d6537dce68c8b7874333a1720958bd8db3ae8ca"),
- "refs/tags/v2.6.11": core.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
- "refs/tags/v2.6.11-tree": core.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
- },
- peeled: map[string]core.Hash{
- "refs/tags/v2.6.11": core.NewHash("c39ae07f393806ccf406ef966e9a15afc43cc36a"),
- "refs/tags/v2.6.11-tree": core.NewHash("c39ae07f393806ccf406ef966e9a15afc43cc36a"),
- },
- },
- } {
- ar := testDecodeOK(c, test.input)
- comment := Commentf("input = %v\n", test.input)
- c.Assert(ar.References, DeepEquals, test.references, comment)
- c.Assert(ar.Peeled, DeepEquals, test.peeled, comment)
- }
-}
-
-func (s *SuiteDecoder) TestMalformedOtherRefsNoSpace(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8crefs/tags/v2.6.11\n",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*malformed ref data.*")
-}
-
-func (s *SuiteDecoder) TestMalformedOtherRefsMultipleSpaces(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags v2.6.11\n",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*malformed ref data.*")
-}
-
-func (s *SuiteDecoder) TestShallow(c *C) {
- for _, test := range [...]struct {
- input []string
- shallows []core.Hash
- }{
- {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- pktline.FlushString,
- },
- shallows: []core.Hash{},
- }, {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "shallow 1111111111111111111111111111111111111111\n",
- pktline.FlushString,
- },
- shallows: []core.Hash{core.NewHash("1111111111111111111111111111111111111111")},
- }, {
- input: []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "shallow 1111111111111111111111111111111111111111\n",
- "shallow 2222222222222222222222222222222222222222\n",
- pktline.FlushString,
- },
- shallows: []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111"),
- core.NewHash("2222222222222222222222222222222222222222"),
- },
- },
- } {
- ar := testDecodeOK(c, test.input)
- comment := Commentf("input = %v\n", test.input)
- c.Assert(ar.Shallows, DeepEquals, test.shallows, comment)
- }
-}
-
-func (s *SuiteDecoder) TestInvalidShallowHash(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "shallow 11111111alcortes111111111111111111111111\n",
- "shallow 2222222222222222222222222222222222222222\n",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*invalid hash text.*")
-}
-
-func (s *SuiteDecoder) TestGarbageAfterShallow(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "shallow 1111111111111111111111111111111111111111\n",
- "shallow 2222222222222222222222222222222222222222\n",
- "b5be40b90dbaa6bd337f3b77de361bfc0723468b refs/tags/v4.4",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*malformed shallow prefix.*")
-}
-
-func (s *SuiteDecoder) TestMalformedShallowHash(c *C) {
- payloads := []string{
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
- "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
- "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
- "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
- "shallow 1111111111111111111111111111111111111111\n",
- "shallow 2222222222222222222222222222222222222222 malformed\n",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*malformed shallow hash.*")
-}
-
-func (s *SuiteDecoder) TestEOFRefs(c *C) {
- input := strings.NewReader("" +
- "005b6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n" +
- "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" +
- "00355dc01c595e6c6ec9ccda4f6ffbf614e4d92bb0c7 refs/foo\n",
- )
- testDecoderErrorMatches(c, input, ".*invalid pkt-len.*")
-}
-
-func (s *SuiteDecoder) TestEOFShallows(c *C) {
- input := strings.NewReader("" +
- "005b6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n" +
- "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" +
- "00445dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n" +
- "0047c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n" +
- "0035shallow 1111111111111111111111111111111111111111\n" +
- "0034shallow 222222222222222222222222")
- testDecoderErrorMatches(c, input, ".*unexpected EOF.*")
-}
diff --git a/formats/packp/advrefs/encoder.go b/formats/packp/advrefs/encoder.go
deleted file mode 100644
index 9874884..0000000
--- a/formats/packp/advrefs/encoder.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package advrefs
-
-import (
- "bytes"
- "io"
- "sort"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-)
-
-// An Encoder writes AdvRefs values to an output stream.
-type Encoder struct {
- data *AdvRefs // data to encode
- pe *pktline.Encoder // where to write the encoded data
- err error // sticky error
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- pe: pktline.NewEncoder(w),
- }
-}
-
-// Encode writes the AdvRefs encoding of v to the stream.
-//
-// All the payloads will end with a newline character. Capabilities,
-// references and shallows are writen in alphabetical order, except for
-// peeled references that always follow their corresponding references.
-func (e *Encoder) Encode(v *AdvRefs) error {
- e.data = v
-
- for state := encodePrefix; state != nil; {
- state = state(e)
- }
-
- return e.err
-}
-
-type encoderStateFn func(*Encoder) encoderStateFn
-
-func encodePrefix(e *Encoder) encoderStateFn {
- for _, p := range e.data.Prefix {
- if bytes.Equal(p, pktline.Flush) {
- if e.err = e.pe.Flush(); e.err != nil {
- return nil
- }
- continue
- }
- if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil {
- return nil
- }
- }
-
- return encodeFirstLine
-}
-
-// Adds the first pkt-line payload: head hash, head ref and capabilities.
-// Also handle the special case when no HEAD ref is found.
-func encodeFirstLine(e *Encoder) encoderStateFn {
- head := formatHead(e.data.Head)
- separator := formatSeparator(e.data.Head)
- capabilities := formatCaps(e.data.Capabilities)
-
- if e.err = e.pe.Encodef("%s %s\x00%s\n", head, separator, capabilities); e.err != nil {
- return nil
- }
-
- return encodeRefs
-}
-
-func formatHead(h *core.Hash) string {
- if h == nil {
- return core.ZeroHash.String()
- }
-
- return h.String()
-}
-
-func formatSeparator(h *core.Hash) string {
- if h == nil {
- return noHead
- }
-
- return head
-}
-
-func formatCaps(c *packp.Capabilities) string {
- if c == nil {
- return ""
- }
-
- c.Sort()
-
- return c.String()
-}
-
-// Adds the (sorted) refs: hash SP refname EOL
-// and their peeled refs if any.
-func encodeRefs(e *Encoder) encoderStateFn {
- refs := sortRefs(e.data.References)
- for _, r := range refs {
- hash, _ := e.data.References[r]
- if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil {
- return nil
- }
-
- if hash, ok := e.data.Peeled[r]; ok {
- if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil {
- return nil
- }
- }
- }
-
- return encodeShallow
-}
-
-func sortRefs(m map[string]core.Hash) []string {
- ret := make([]string, 0, len(m))
- for k := range m {
- ret = append(ret, k)
- }
- sort.Strings(ret)
-
- return ret
-}
-
-// Adds the (sorted) shallows: "shallow" SP hash EOL
-func encodeShallow(e *Encoder) encoderStateFn {
- sorted := sortShallows(e.data.Shallows)
- for _, hash := range sorted {
- if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil {
- return nil
- }
- }
-
- return encodeFlush
-}
-
-func sortShallows(c []core.Hash) []string {
- ret := []string{}
- for _, h := range c {
- ret = append(ret, h.String())
- }
- sort.Strings(ret)
-
- return ret
-}
-
-func encodeFlush(e *Encoder) encoderStateFn {
- e.err = e.pe.Flush()
- return nil
-}
diff --git a/formats/packp/advrefs/encoder_test.go b/formats/packp/advrefs/encoder_test.go
deleted file mode 100644
index 8fb475b..0000000
--- a/formats/packp/advrefs/encoder_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-package advrefs_test
-
-import (
- "bytes"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp"
- "gopkg.in/src-d/go-git.v4/formats/packp/advrefs"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-
- . "gopkg.in/check.v1"
-)
-
-type SuiteEncoder struct{}
-
-var _ = Suite(&SuiteEncoder{})
-
-// returns a byte slice with the pkt-lines for the given payloads.
-func pktlines(c *C, payloads ...[]byte) []byte {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
- err := e.Encode(payloads...)
- c.Assert(err, IsNil, Commentf("building pktlines for %v\n", payloads))
-
- return buf.Bytes()
-}
-
-func testEncode(c *C, input *advrefs.AdvRefs, expected []byte) {
- var buf bytes.Buffer
- e := advrefs.NewEncoder(&buf)
- err := e.Encode(input)
- c.Assert(err, IsNil)
- obtained := buf.Bytes()
-
- comment := Commentf("\nobtained = %s\nexpected = %s\n", string(obtained), string(expected))
-
- c.Assert(obtained, DeepEquals, expected, comment)
-}
-
-func (s *SuiteEncoder) TestZeroValue(c *C) {
- ar := &advrefs.AdvRefs{}
-
- expected := pktlines(c,
- []byte("0000000000000000000000000000000000000000 capabilities^{}\x00\n"),
- pktline.Flush,
- )
-
- testEncode(c, ar, expected)
-}
-
-func (s *SuiteEncoder) TestHead(c *C) {
- hash := core.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
- ar := &advrefs.AdvRefs{
- Head: &hash,
- }
-
- expected := pktlines(c,
- []byte("6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n"),
- pktline.Flush,
- )
-
- testEncode(c, ar, expected)
-}
-
-func (s *SuiteEncoder) TestCapsNoHead(c *C) {
- capabilities := packp.NewCapabilities()
- capabilities.Add("symref", "HEAD:/refs/heads/master")
- capabilities.Add("ofs-delta")
- capabilities.Add("multi_ack")
- ar := &advrefs.AdvRefs{
- Capabilities: capabilities,
- }
-
- expected := pktlines(c,
- []byte("0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n"),
- pktline.Flush,
- )
-
- testEncode(c, ar, expected)
-}
-
-func (s *SuiteEncoder) TestCapsWithHead(c *C) {
- hash := core.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
- capabilities := packp.NewCapabilities()
- capabilities.Add("symref", "HEAD:/refs/heads/master")
- capabilities.Add("ofs-delta")
- capabilities.Add("multi_ack")
- ar := &advrefs.AdvRefs{
- Head: &hash,
- Capabilities: capabilities,
- }
-
- expected := pktlines(c,
- []byte("6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n"),
- pktline.Flush,
- )
-
- testEncode(c, ar, expected)
-}
-
-func (s *SuiteEncoder) TestRefs(c *C) {
- references := map[string]core.Hash{
- "refs/heads/master": core.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
- "refs/tags/v2.6.12-tree": core.NewHash("1111111111111111111111111111111111111111"),
- "refs/tags/v2.7.13-tree": core.NewHash("3333333333333333333333333333333333333333"),
- "refs/tags/v2.6.13-tree": core.NewHash("2222222222222222222222222222222222222222"),
- "refs/tags/v2.6.11-tree": core.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
- }
- ar := &advrefs.AdvRefs{
- References: references,
- }
-
- expected := pktlines(c,
- []byte("0000000000000000000000000000000000000000 capabilities^{}\x00\n"),
- []byte("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n"),
- []byte("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n"),
- []byte("1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n"),
- []byte("2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n"),
- []byte("3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n"),
- pktline.Flush,
- )
-
- testEncode(c, ar, expected)
-}
-
-func (s *SuiteEncoder) TestPeeled(c *C) {
- references := map[string]core.Hash{
- "refs/heads/master": core.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
- "refs/tags/v2.6.12-tree": core.NewHash("1111111111111111111111111111111111111111"),
- "refs/tags/v2.7.13-tree": core.NewHash("3333333333333333333333333333333333333333"),
- "refs/tags/v2.6.13-tree": core.NewHash("2222222222222222222222222222222222222222"),
- "refs/tags/v2.6.11-tree": core.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
- }
- peeled := map[string]core.Hash{
- "refs/tags/v2.7.13-tree": core.NewHash("4444444444444444444444444444444444444444"),
- "refs/tags/v2.6.12-tree": core.NewHash("5555555555555555555555555555555555555555"),
- }
- ar := &advrefs.AdvRefs{
- References: references,
- Peeled: peeled,
- }
-
- expected := pktlines(c,
- []byte("0000000000000000000000000000000000000000 capabilities^{}\x00\n"),
- []byte("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n"),
- []byte("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n"),
- []byte("1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n"),
- []byte("5555555555555555555555555555555555555555 refs/tags/v2.6.12-tree^{}\n"),
- []byte("2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n"),
- []byte("3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n"),
- []byte("4444444444444444444444444444444444444444 refs/tags/v2.7.13-tree^{}\n"),
- pktline.Flush,
- )
-
- testEncode(c, ar, expected)
-}
-
-func (s *SuiteEncoder) TestShallow(c *C) {
- shallows := []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111"),
- core.NewHash("4444444444444444444444444444444444444444"),
- core.NewHash("3333333333333333333333333333333333333333"),
- core.NewHash("2222222222222222222222222222222222222222"),
- }
- ar := &advrefs.AdvRefs{
- Shallows: shallows,
- }
-
- expected := pktlines(c,
- []byte("0000000000000000000000000000000000000000 capabilities^{}\x00\n"),
- []byte("shallow 1111111111111111111111111111111111111111\n"),
- []byte("shallow 2222222222222222222222222222222222222222\n"),
- []byte("shallow 3333333333333333333333333333333333333333\n"),
- []byte("shallow 4444444444444444444444444444444444444444\n"),
- pktline.Flush,
- )
-
- testEncode(c, ar, expected)
-}
-
-func (s *SuiteEncoder) TestAll(c *C) {
- hash := core.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
-
- capabilities := packp.NewCapabilities()
- capabilities.Add("symref", "HEAD:/refs/heads/master")
- capabilities.Add("ofs-delta")
- capabilities.Add("multi_ack")
-
- references := map[string]core.Hash{
- "refs/heads/master": core.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
- "refs/tags/v2.6.12-tree": core.NewHash("1111111111111111111111111111111111111111"),
- "refs/tags/v2.7.13-tree": core.NewHash("3333333333333333333333333333333333333333"),
- "refs/tags/v2.6.13-tree": core.NewHash("2222222222222222222222222222222222222222"),
- "refs/tags/v2.6.11-tree": core.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
- }
-
- peeled := map[string]core.Hash{
- "refs/tags/v2.7.13-tree": core.NewHash("4444444444444444444444444444444444444444"),
- "refs/tags/v2.6.12-tree": core.NewHash("5555555555555555555555555555555555555555"),
- }
-
- shallows := []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111"),
- core.NewHash("4444444444444444444444444444444444444444"),
- core.NewHash("3333333333333333333333333333333333333333"),
- core.NewHash("2222222222222222222222222222222222222222"),
- }
-
- ar := &advrefs.AdvRefs{
- Head: &hash,
- Capabilities: capabilities,
- References: references,
- Peeled: peeled,
- Shallows: shallows,
- }
-
- expected := pktlines(c,
- []byte("6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n"),
- []byte("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n"),
- []byte("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n"),
- []byte("1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n"),
- []byte("5555555555555555555555555555555555555555 refs/tags/v2.6.12-tree^{}\n"),
- []byte("2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n"),
- []byte("3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n"),
- []byte("4444444444444444444444444444444444444444 refs/tags/v2.7.13-tree^{}\n"),
- []byte("shallow 1111111111111111111111111111111111111111\n"),
- []byte("shallow 2222222222222222222222222222222222222222\n"),
- []byte("shallow 3333333333333333333333333333333333333333\n"),
- []byte("shallow 4444444444444444444444444444444444444444\n"),
- pktline.Flush,
- )
-
- testEncode(c, ar, expected)
-}
-
-func (s *SuiteEncoder) TestErrorTooLong(c *C) {
- references := map[string]core.Hash{
- strings.Repeat("a", pktline.MaxPayloadSize): core.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
- }
- ar := &advrefs.AdvRefs{
- References: references,
- }
-
- var buf bytes.Buffer
- e := advrefs.NewEncoder(&buf)
- err := e.Encode(ar)
- c.Assert(err, ErrorMatches, ".*payload is too long.*")
-}
diff --git a/formats/packp/capabilities.go b/formats/packp/capabilities.go
deleted file mode 100644
index d77c2fa..0000000
--- a/formats/packp/capabilities.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package packp
-
-import (
- "fmt"
- "sort"
- "strings"
-)
-
-// Capabilities contains all the server capabilities
-// https://github.com/git/git/blob/master/Documentation/technical/protocol-capabilities.txt
-type Capabilities struct {
- m map[string]*Capability
- o []string
-}
-
-// Capability represents a server capability
-type Capability struct {
- Name string
- Values []string
-}
-
-// NewCapabilities returns a new Capabilities struct
-func NewCapabilities() *Capabilities {
- return &Capabilities{
- m: make(map[string]*Capability),
- }
-}
-
-func (c *Capabilities) IsEmpty() bool {
- return len(c.o) == 0
-}
-
-// Decode decodes a string
-func (c *Capabilities) Decode(raw string) {
- params := strings.Split(raw, " ")
- for _, p := range params {
- s := strings.SplitN(p, "=", 2)
-
- var value string
- if len(s) == 2 {
- value = s[1]
- }
-
- c.Add(s[0], value)
- }
-}
-
-// Get returns the values for a capability
-func (c *Capabilities) Get(capability string) *Capability {
- return c.m[capability]
-}
-
-// Set sets a capability removing the values
-func (c *Capabilities) Set(capability string, values ...string) {
- if _, ok := c.m[capability]; ok {
- delete(c.m, capability)
- }
-
- c.Add(capability, values...)
-}
-
-// Add adds a capability, values are optional
-func (c *Capabilities) Add(capability string, values ...string) {
- if !c.Supports(capability) {
- c.m[capability] = &Capability{Name: capability}
- c.o = append(c.o, capability)
- }
-
- if len(values) == 0 {
- return
- }
-
- c.m[capability].Values = append(c.m[capability].Values, values...)
-}
-
-// Supports returns true if capability is present
-func (c *Capabilities) Supports(capability string) bool {
- _, ok := c.m[capability]
- return ok
-}
-
-// SymbolicReference returns the reference for a given symbolic reference
-func (c *Capabilities) SymbolicReference(sym string) string {
- if !c.Supports("symref") {
- return ""
- }
-
- for _, symref := range c.Get("symref").Values {
- parts := strings.Split(symref, ":")
- if len(parts) != 2 {
- continue
- }
-
- if parts[0] == sym {
- return parts[1]
- }
- }
-
- return ""
-}
-
-// Sorts capabilities in increasing order of their name
-func (c *Capabilities) Sort() {
- sort.Strings(c.o)
-}
-
-func (c *Capabilities) String() string {
- if len(c.o) == 0 {
- return ""
- }
-
- var o string
- for _, key := range c.o {
- cap := c.m[key]
-
- added := false
- for _, value := range cap.Values {
- if value == "" {
- continue
- }
-
- added = true
- o += fmt.Sprintf("%s=%s ", key, value)
- }
-
- if len(cap.Values) == 0 || !added {
- o += key + " "
- }
- }
-
- if len(o) == 0 {
- return o
- }
-
- return o[:len(o)-1]
-}
diff --git a/formats/packp/capabilities_test.go b/formats/packp/capabilities_test.go
deleted file mode 100644
index e42a0c7..0000000
--- a/formats/packp/capabilities_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package packp
-
-import (
- "testing"
-
- . "gopkg.in/check.v1"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-type SuiteCapabilities struct{}
-
-var _ = Suite(&SuiteCapabilities{})
-
-func (s *SuiteCapabilities) TestDecode(c *C) {
- cap := NewCapabilities()
- cap.Decode("symref=foo symref=qux thin-pack")
-
- c.Assert(cap.m, HasLen, 2)
- c.Assert(cap.Get("symref").Values, DeepEquals, []string{"foo", "qux"})
- c.Assert(cap.Get("thin-pack").Values, DeepEquals, []string{""})
-}
-
-func (s *SuiteCapabilities) TestSet(c *C) {
- cap := NewCapabilities()
- cap.Add("symref", "foo", "qux")
- cap.Set("symref", "bar")
-
- c.Assert(cap.m, HasLen, 1)
- c.Assert(cap.Get("symref").Values, DeepEquals, []string{"bar"})
-}
-
-func (s *SuiteCapabilities) TestSetEmpty(c *C) {
- cap := NewCapabilities()
- cap.Set("foo", "bar")
-
- c.Assert(cap.Get("foo").Values, HasLen, 1)
-}
-
-func (s *SuiteCapabilities) TestAdd(c *C) {
- cap := NewCapabilities()
- cap.Add("symref", "foo", "qux")
- cap.Add("thin-pack")
-
- c.Assert(cap.String(), Equals, "symref=foo symref=qux thin-pack")
-}
diff --git a/formats/packp/doc.go b/formats/packp/doc.go
deleted file mode 100644
index 4950d1d..0000000
--- a/formats/packp/doc.go
+++ /dev/null
@@ -1,724 +0,0 @@
-package packp
-
-/*
-
-A nice way to trace the real data transmitted and received by git, use:
-
-GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git
-GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git
-
-Here follows a copy of the current protocol specification at the time of
-this writing.
-
-(Please notice that most http git servers will add a flush-pkt after the
-first pkt-line when using HTTP smart.)
-
-
-Documentation Common to Pack and Http Protocols
-===============================================
-
-ABNF Notation
--------------
-
-ABNF notation as described by RFC 5234 is used within the protocol documents,
-except the following replacement core rules are used:
-----
- HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f"
-----
-
-We also define the following common rules:
-----
- NUL = %x00
- zero-id = 40*"0"
- obj-id = 40*(HEXDIGIT)
-
- refname = "HEAD"
- refname /= "refs/" <see discussion below>
-----
-
-A refname is a hierarchical octet string beginning with "refs/" and
-not violating the 'git-check-ref-format' command's validation rules.
-More specifically, they:
-
-. They can include slash `/` for hierarchical (directory)
- grouping, but no slash-separated component can begin with a
- dot `.`.
-
-. They must contain at least one `/`. This enforces the presence of a
- category like `heads/`, `tags/` etc. but the actual names are not
- restricted.
-
-. They cannot have two consecutive dots `..` anywhere.
-
-. They cannot have ASCII control characters (i.e. bytes whose
- values are lower than \040, or \177 `DEL`), space, tilde `~`,
- caret `^`, colon `:`, question-mark `?`, asterisk `*`,
- or open bracket `[` anywhere.
-
-. They cannot end with a slash `/` or a dot `.`.
-
-. They cannot end with the sequence `.lock`.
-
-. They cannot contain a sequence `@{`.
-
-. They cannot contain a `\\`.
-
-
-pkt-line Format
----------------
-
-Much (but not all) of the payload is described around pkt-lines.
-
-A pkt-line is a variable length binary string. The first four bytes
-of the line, the pkt-len, indicates the total length of the line,
-in hexadecimal. The pkt-len includes the 4 bytes used to contain
-the length's hexadecimal representation.
-
-A pkt-line MAY contain binary data, so implementors MUST ensure
-pkt-line parsing/formatting routines are 8-bit clean.
-
-A non-binary line SHOULD BE terminated by an LF, which if present
-MUST be included in the total length. Receivers MUST treat pkt-lines
-with non-binary data the same whether or not they contain the trailing
-LF (stripping the LF if present, and not complaining when it is
-missing).
-
-The maximum length of a pkt-line's data component is 65516 bytes.
-Implementations MUST NOT send pkt-line whose length exceeds 65520
-(65516 bytes of payload + 4 bytes of length data).
-
-Implementations SHOULD NOT send an empty pkt-line ("0004").
-
-A pkt-line with a length field of 0 ("0000"), called a flush-pkt,
-is a special case and MUST be handled differently than an empty
-pkt-line ("0004").
-
-----
- pkt-line = data-pkt / flush-pkt
-
- data-pkt = pkt-len pkt-payload
- pkt-len = 4*(HEXDIG)
- pkt-payload = (pkt-len - 4)*(OCTET)
-
- flush-pkt = "0000"
-----
-
-Examples (as C-style strings):
-
-----
- pkt-line actual value
- ---------------------------------
- "0006a\n" "a\n"
- "0005a" "a"
- "000bfoobar\n" "foobar\n"
- "0004" ""
-----
-
-Packfile transfer protocols
-===========================
-
-Git supports transferring data in packfiles over the ssh://, git://, http:// and
-file:// transports. There exist two sets of protocols, one for pushing
-data from a client to a server and another for fetching data from a
-server to a client. The three transports (ssh, git, file) use the same
-protocol to transfer data. http is documented in http-protocol.txt.
-
-The processes invoked in the canonical Git implementation are 'upload-pack'
-on the server side and 'fetch-pack' on the client side for fetching data;
-then 'receive-pack' on the server and 'send-pack' on the client for pushing
-data. The protocol functions to have a server tell a client what is
-currently on the server, then for the two to negotiate the smallest amount
-of data to send in order to fully update one or the other.
-
-pkt-line Format
----------------
-
-The descriptions below build on the pkt-line format described in
-protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless
-otherwise noted the usual pkt-line LF rules apply: the sender SHOULD
-include a LF, but the receiver MUST NOT complain if it is not present.
-
-Transports
-----------
-There are three transports over which the packfile protocol is
-initiated. The Git transport is a simple, unauthenticated server that
-takes the command (almost always 'upload-pack', though Git
-servers can be configured to be globally writable, in which 'receive-
-pack' initiation is also allowed) with which the client wishes to
-communicate and executes it and connects it to the requesting
-process.
-
-In the SSH transport, the client just runs the 'upload-pack'
-or 'receive-pack' process on the server over the SSH protocol and then
-communicates with that invoked process over the SSH connection.
-
-The file:// transport runs the 'upload-pack' or 'receive-pack'
-process locally and communicates with it over a pipe.
-
-Git Transport
--------------
-
-The Git transport starts off by sending the command and repository
-on the wire using the pkt-line format, followed by a NUL byte and a
-hostname parameter, terminated by a NUL byte.
-
- 0032git-upload-pack /project.git\0host=myserver.com\0
-
---
- git-proto-request = request-command SP pathname NUL [ host-parameter NUL ]
- request-command = "git-upload-pack" / "git-receive-pack" /
- "git-upload-archive" ; case sensitive
- pathname = *( %x01-ff ) ; exclude NUL
- host-parameter = "host=" hostname [ ":" port ]
---
-
-Only host-parameter is allowed in the git-proto-request. Clients
-MUST NOT attempt to send additional parameters. It is used for the
-git-daemon name based virtual hosting. See --interpolated-path
-option to git daemon, with the %H/%CH format characters.
-
-Basically what the Git client is doing to connect to an 'upload-pack'
-process on the server side over the Git protocol is this:
-
- $ echo -e -n \
- "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
- nc -v example.com 9418
-
-If the server refuses the request for some reasons, it could abort
-gracefully with an error message.
-
-----
- error-line = PKT-LINE("ERR" SP explanation-text)
-----
-
-
-SSH Transport
--------------
-
-Initiating the upload-pack or receive-pack processes over SSH is
-executing the binary on the server via SSH remote execution.
-It is basically equivalent to running this:
-
- $ ssh git.example.com "git-upload-pack '/project.git'"
-
-For a server to support Git pushing and pulling for a given user over
-SSH, that user needs to be able to execute one or both of those
-commands via the SSH shell that they are provided on login. On some
-systems, that shell access is limited to only being able to run those
-two commands, or even just one of them.
-
-In an ssh:// format URI, it's absolute in the URI, so the '/' after
-the host name (or port number) is sent as an argument, which is then
-read by the remote git-upload-pack exactly as is, so it's effectively
-an absolute path in the remote filesystem.
-
- git clone ssh://user@example.com/project.git
- |
- v
- ssh user@example.com "git-upload-pack '/project.git'"
-
-In a "user@host:path" format URI, its relative to the user's home
-directory, because the Git client will run:
-
- git clone user@example.com:project.git
- |
- v
- ssh user@example.com "git-upload-pack 'project.git'"
-
-The exception is if a '~' is used, in which case
-we execute it without the leading '/'.
-
- ssh://user@example.com/~alice/project.git,
- |
- v
- ssh user@example.com "git-upload-pack '~alice/project.git'"
-
-A few things to remember here:
-
-- The "command name" is spelled with dash (e.g. git-upload-pack), but
- this can be overridden by the client;
-
-- The repository path is always quoted with single quotes.
-
-Fetching Data From a Server
----------------------------
-
-When one Git repository wants to get data that a second repository
-has, the first can 'fetch' from the second. This operation determines
-what data the server has that the client does not then streams that
-data down to the client in packfile format.
-
-
-Reference Discovery
--------------------
-
-When the client initially connects the server will immediately respond
-with a listing of each reference it has (all branches and tags) along
-with the object name that each reference currently points to.
-
- $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
- nc -v example.com 9418
- 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack
- side-band side-band-64k ofs-delta shallow no-progress include-tag
- 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration
- 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master
- 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9
- 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0
- 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{}
- 0000
-
-The returned response is a pkt-line stream describing each ref and
-its current value. The stream MUST be sorted by name according to
-the C locale ordering.
-
-If HEAD is a valid ref, HEAD MUST appear as the first advertised
-ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the
-advertisement list at all, but other refs may still appear.
-
-The stream MUST include capability declarations behind a NUL on the
-first ref. The peeled value of a ref (that is "ref^{}") MUST be
-immediately after the ref itself, if presented. A conforming server
-MUST peel the ref if it's an annotated tag.
-
-----
- advertised-refs = (no-refs / list-of-refs)
- *shallow
- flush-pkt
-
- no-refs = PKT-LINE(zero-id SP "capabilities^{}"
- NUL capability-list)
-
- list-of-refs = first-ref *other-ref
- first-ref = PKT-LINE(obj-id SP refname
- NUL capability-list)
-
- other-ref = PKT-LINE(other-tip / other-peeled)
- other-tip = obj-id SP refname
- other-peeled = obj-id SP refname "^{}"
-
- shallow = PKT-LINE("shallow" SP obj-id)
-
- capability-list = capability *(SP capability)
- capability = 1*(LC_ALPHA / DIGIT / "-" / "_")
- LC_ALPHA = %x61-7A
-----
-
-Server and client MUST use lowercase for obj-id, both MUST treat obj-id
-as case-insensitive.
-
-See protocol-capabilities.txt for a list of allowed server capabilities
-and descriptions.
-
-Packfile Negotiation
---------------------
-After reference and capabilities discovery, the client can decide to
-terminate the connection by sending a flush-pkt, telling the server it can
-now gracefully terminate, and disconnect, when it does not need any pack
-data. This can happen with the ls-remote command, and also can happen when
-the client already is up-to-date.
-
-Otherwise, it enters the negotiation phase, where the client and
-server determine what the minimal packfile necessary for transport is,
-by telling the server what objects it wants, its shallow objects
-(if any), and the maximum commit depth it wants (if any). The client
-will also send a list of the capabilities it wants to be in effect,
-out of what the server said it could do with the first 'want' line.
-
-----
- upload-request = want-list
- *shallow-line
- *1depth-request
- flush-pkt
-
- want-list = first-want
- *additional-want
-
- shallow-line = PKT-LINE("shallow" SP obj-id)
-
- depth-request = PKT-LINE("deepen" SP depth) /
- PKT-LINE("deepen-since" SP timestamp) /
- PKT-LINE("deepen-not" SP ref)
-
- first-want = PKT-LINE("want" SP obj-id SP capability-list)
- additional-want = PKT-LINE("want" SP obj-id)
-
- depth = 1*DIGIT
-----
-
-Clients MUST send all the obj-ids it wants from the reference
-discovery phase as 'want' lines. Clients MUST send at least one
-'want' command in the request body. Clients MUST NOT mention an
-obj-id in a 'want' command which did not appear in the response
-obtained through ref discovery.
-
-The client MUST write all obj-ids which it only has shallow copies
-of (meaning that it does not have the parents of a commit) as
-'shallow' lines so that the server is aware of the limitations of
-the client's history.
-
-The client now sends the maximum commit history depth it wants for
-this transaction, which is the number of commits it wants from the
-tip of the history, if any, as a 'deepen' line. A depth of 0 is the
-same as not making a depth request. The client does not want to receive
-any commits beyond this depth, nor does it want objects needed only to
-complete those commits. Commits whose parents are not received as a
-result are defined as shallow and marked as such in the server. This
-information is sent back to the client in the next step.
-
-Once all the 'want's and 'shallow's (and optional 'deepen') are
-transferred, clients MUST send a flush-pkt, to tell the server side
-that it is done sending the list.
-
-Otherwise, if the client sent a positive depth request, the server
-will determine which commits will and will not be shallow and
-send this information to the client. If the client did not request
-a positive depth, this step is skipped.
-
-----
- shallow-update = *shallow-line
- *unshallow-line
- flush-pkt
-
- shallow-line = PKT-LINE("shallow" SP obj-id)
-
- unshallow-line = PKT-LINE("unshallow" SP obj-id)
-----
-
-If the client has requested a positive depth, the server will compute
-the set of commits which are no deeper than the desired depth. The set
-of commits start at the client's wants.
-
-The server writes 'shallow' lines for each
-commit whose parents will not be sent as a result. The server writes
-an 'unshallow' line for each commit which the client has indicated is
-shallow, but is no longer shallow at the currently requested depth
-(that is, its parents will now be sent). The server MUST NOT mark
-as unshallow anything which the client has not indicated was shallow.
-
-Now the client will send a list of the obj-ids it has using 'have'
-lines, so the server can make a packfile that only contains the objects
-that the client needs. In multi_ack mode, the canonical implementation
-will send up to 32 of these at a time, then will send a flush-pkt. The
-canonical implementation will skip ahead and send the next 32 immediately,
-so that there is always a block of 32 "in-flight on the wire" at a time.
-
-----
- upload-haves = have-list
- compute-end
-
- have-list = *have-line
- have-line = PKT-LINE("have" SP obj-id)
- compute-end = flush-pkt / PKT-LINE("done")
-----
-
-If the server reads 'have' lines, it then will respond by ACKing any
-of the obj-ids the client said it had that the server also has. The
-server will ACK obj-ids differently depending on which ack mode is
-chosen by the client.
-
-In multi_ack mode:
-
- * the server will respond with 'ACK obj-id continue' for any common
- commits.
-
- * once the server has found an acceptable common base commit and is
- ready to make a packfile, it will blindly ACK all 'have' obj-ids
- back to the client.
-
- * the server will then send a 'NAK' and then wait for another response
- from the client - either a 'done' or another list of 'have' lines.
-
-In multi_ack_detailed mode:
-
- * the server will differentiate the ACKs where it is signaling
- that it is ready to send data with 'ACK obj-id ready' lines, and
- signals the identified common commits with 'ACK obj-id common' lines.
-
-Without either multi_ack or multi_ack_detailed:
-
- * upload-pack sends "ACK obj-id" on the first common object it finds.
- After that it says nothing until the client gives it a "done".
-
- * upload-pack sends "NAK" on a flush-pkt if no common object
- has been found yet. If one has been found, and thus an ACK
- was already sent, it's silent on the flush-pkt.
-
-After the client has gotten enough ACK responses that it can determine
-that the server has enough information to send an efficient packfile
-(in the canonical implementation, this is determined when it has received
-enough ACKs that it can color everything left in the --date-order queue
-as common with the server, or the --date-order queue is empty), or the
-client determines that it wants to give up (in the canonical implementation,
-this is determined when the client sends 256 'have' lines without getting
-any of them ACKed by the server - meaning there is nothing in common and
-the server should just send all of its objects), then the client will send
-a 'done' command. The 'done' command signals to the server that the client
-is ready to receive its packfile data.
-
-However, the 256 limit *only* turns on in the canonical client
-implementation if we have received at least one "ACK %s continue"
-during a prior round. This helps to ensure that at least one common
-ancestor is found before we give up entirely.
-
-Once the 'done' line is read from the client, the server will either
-send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object
-name of the last commit determined to be common. The server only sends
-ACK after 'done' if there is at least one common base and multi_ack or
-multi_ack_detailed is enabled. The server always sends NAK after 'done'
-if there is no common base found.
-
-Then the server will start sending its packfile data.
-
-----
- server-response = *ack_multi ack / nak
- ack_multi = PKT-LINE("ACK" SP obj-id ack_status)
- ack_status = "continue" / "common" / "ready"
- ack = PKT-LINE("ACK" SP obj-id)
- nak = PKT-LINE("NAK")
-----
-
-A simple clone may look like this (with no 'have' lines):
-
-----
- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
- side-band-64k ofs-delta\n
- C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
- C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
- C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
- C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n
- C: 0000
- C: 0009done\n
-
- S: 0008NAK\n
- S: [PACKFILE]
-----
-
-An incremental update (fetch) response might look like this:
-
-----
- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
- side-band-64k ofs-delta\n
- C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
- C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
- C: 0000
- C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
- C: [30 more have lines]
- C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n
- C: 0000
-
- S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n
- S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n
- S: 0008NAK\n
-
- C: 0009done\n
-
- S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n
- S: [PACKFILE]
-----
-
-
-Packfile Data
--------------
-
-Now that the client and server have finished negotiation about what
-the minimal amount of data that needs to be sent to the client is, the server
-will construct and send the required data in packfile format.
-
-See pack-format.txt for what the packfile itself actually looks like.
-
-If 'side-band' or 'side-band-64k' capabilities have been specified by
-the client, the server will send the packfile data multiplexed.
-
-Each packet starting with the packet-line length of the amount of data
-that follows, followed by a single byte specifying the sideband the
-following data is coming in on.
-
-In 'side-band' mode, it will send up to 999 data bytes plus 1 control
-code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k'
-mode it will send up to 65519 data bytes plus 1 control code, for a
-total of up to 65520 bytes in a pkt-line.
-
-The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain
-packfile data, sideband '2' will be used for progress information that the
-client will generally print to stderr and sideband '3' is used for error
-information.
-
-If no 'side-band' capability was specified, the server will stream the
-entire packfile without multiplexing.
-
-
-Pushing Data To a Server
-------------------------
-
-Pushing data to a server will invoke the 'receive-pack' process on the
-server, which will allow the client to tell it which references it should
-update and then send all the data the server will need for those new
-references to be complete. Once all the data is received and validated,
-the server will then update its references to what the client specified.
-
-Authentication
---------------
-
-The protocol itself contains no authentication mechanisms. That is to be
-handled by the transport, such as SSH, before the 'receive-pack' process is
-invoked. If 'receive-pack' is configured over the Git transport, those
-repositories will be writable by anyone who can access that port (9418) as
-that transport is unauthenticated.
-
-Reference Discovery
--------------------
-
-The reference discovery phase is done nearly the same way as it is in the
-fetching protocol. Each reference obj-id and name on the server is sent
-in packet-line format to the client, followed by a flush-pkt. The only
-real difference is that the capability listing is different - the only
-possible values are 'report-status', 'delete-refs', 'ofs-delta' and
-'push-options'.
-
-Reference Update Request and Packfile Transfer
-----------------------------------------------
-
-Once the client knows what references the server is at, it can send a
-list of reference update requests. For each reference on the server
-that it wants to update, it sends a line listing the obj-id currently on
-the server, the obj-id the client would like to update it to and the name
-of the reference.
-
-This list is followed by a flush-pkt. Then the push options are transmitted
-one per packet followed by another flush-pkt. After that the packfile that
-should contain all the objects that the server will need to complete the new
-references will be sent.
-
-----
- update-request = *shallow ( command-list | push-cert ) [packfile]
-
- shallow = PKT-LINE("shallow" SP obj-id)
-
- command-list = PKT-LINE(command NUL capability-list)
- *PKT-LINE(command)
- flush-pkt
-
- command = create / delete / update
- create = zero-id SP new-id SP name
- delete = old-id SP zero-id SP name
- update = old-id SP new-id SP name
-
- old-id = obj-id
- new-id = obj-id
-
- push-cert = PKT-LINE("push-cert" NUL capability-list LF)
- PKT-LINE("certificate version 0.1" LF)
- PKT-LINE("pusher" SP ident LF)
- PKT-LINE("pushee" SP url LF)
- PKT-LINE("nonce" SP nonce LF)
- PKT-LINE(LF)
- *PKT-LINE(command LF)
- *PKT-LINE(gpg-signature-lines LF)
- PKT-LINE("push-cert-end" LF)
-
- packfile = "PACK" 28*(OCTET)
-----
-
-If the receiving end does not support delete-refs, the sending end MUST
-NOT ask for delete command.
-
-If the receiving end does not support push-cert, the sending end
-MUST NOT send a push-cert command. When a push-cert command is
-sent, command-list MUST NOT be sent; the commands recorded in the
-push certificate is used instead.
-
-The packfile MUST NOT be sent if the only command used is 'delete'.
-
-A packfile MUST be sent if either create or update command is used,
-even if the server already has all the necessary objects. In this
-case the client MUST send an empty packfile. The only time this
-is likely to happen is if the client is creating
-a new branch or a tag that points to an existing obj-id.
-
-The server will receive the packfile, unpack it, then validate each
-reference that is being updated that it hasn't changed while the request
-was being processed (the obj-id is still the same as the old-id), and
-it will run any update hooks to make sure that the update is acceptable.
-If all of that is fine, the server will then update the references.
-
-Push Certificate
-----------------
-
-A push certificate begins with a set of header lines. After the
-header and an empty line, the protocol commands follow, one per
-line. Note that the trailing LF in push-cert PKT-LINEs is _not_
-optional; it must be present.
-
-Currently, the following header fields are defined:
-
-`pusher` ident::
- Identify the GPG key in "Human Readable Name <email@address>"
- format.
-
-`pushee` url::
- The repository URL (anonymized, if the URL contains
- authentication material) the user who ran `git push`
- intended to push into.
-
-`nonce` nonce::
- The 'nonce' string the receiving repository asked the
- pushing user to include in the certificate, to prevent
- replay attacks.
-
-The GPG signature lines are a detached signature for the contents
-recorded in the push certificate before the signature block begins.
-The detached signature is used to certify that the commands were
-given by the pusher, who must be the signer.
-
-Report Status
--------------
-
-After receiving the pack data from the sender, the receiver sends a
-report if 'report-status' capability is in effect.
-It is a short listing of what happened in that update. It will first
-list the status of the packfile unpacking as either 'unpack ok' or
-'unpack [error]'. Then it will list the status for each of the references
-that it tried to update. Each line is either 'ok [refname]' if the
-update was successful, or 'ng [refname] [error]' if the update was not.
-
-----
- report-status = unpack-status
- 1*(command-status)
- flush-pkt
-
- unpack-status = PKT-LINE("unpack" SP unpack-result)
- unpack-result = "ok" / error-msg
-
- command-status = command-ok / command-fail
- command-ok = PKT-LINE("ok" SP refname)
- command-fail = PKT-LINE("ng" SP refname SP error-msg)
-
- error-msg = 1*(OCTECT) ; where not "ok"
-----
-
-Updates can be unsuccessful for a number of reasons. The reference can have
-changed since the reference discovery phase was originally sent, meaning
-someone pushed in the meantime. The reference being pushed could be a
-non-fast-forward reference and the update hooks or configuration could be
-set to not allow that, etc. Also, some references can be updated while others
-can be rejected.
-
-An example client/server communication might look like this:
-
-----
- S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
- S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n
- S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n
- S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
- S: 0000
-
- C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
- C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
- C: 0000
- C: [PACKDATA]
-
- S: 000eunpack ok\n
- S: 0018ok refs/heads/debug\n
- S: 002ang refs/heads/master non-fast-forward\n
-----
-*/
diff --git a/formats/packp/pktline/encoder.go b/formats/packp/pktline/encoder.go
deleted file mode 100644
index 0a88a9b..0000000
--- a/formats/packp/pktline/encoder.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Package pktline implements reading payloads form pkt-lines and encoding pkt-lines from payloads.
-package pktline
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-)
-
-// An Encoder writes pkt-lines to an output stream.
-type Encoder struct {
- w io.Writer
-}
-
-const (
- // MaxPayloadSize is the maximum payload size of a pkt-line in bytes.
- MaxPayloadSize = 65516
-)
-
-var (
- // FlushPkt are the contents of a flush-pkt pkt-line.
- FlushPkt = []byte{'0', '0', '0', '0'}
- // Flush is the payload to use with the Encode method to encode a flush-pkt.
- Flush = []byte{}
- // FlushString is the payload to use with the EncodeString method to encode a flush-pkt.
- FlushString = ""
- // ErrPayloadTooLong is returned by the Encode methods when any of the
- // provided payloads is bigger than MaxPayloadSize.
- ErrPayloadTooLong = errors.New("payload is too long")
-)
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: w,
- }
-}
-
-// Flush encodes a flush-pkt to the output stream.
-func (e *Encoder) Flush() error {
- _, err := e.w.Write(FlushPkt)
- return err
-}
-
-// Encode encodes a pkt-line with the payload specified and write it to
-// the output stream. If several payloads are specified, each of them
-// will get streamed in their own pkt-lines.
-func (e *Encoder) Encode(payloads ...[]byte) error {
- for _, p := range payloads {
- if err := e.encodeLine(p); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeLine(p []byte) error {
- if len(p) > MaxPayloadSize {
- return ErrPayloadTooLong
- }
-
- if bytes.Equal(p, Flush) {
- if err := e.Flush(); err != nil {
- return err
- }
- return nil
- }
-
- n := len(p) + 4
- if _, err := e.w.Write(asciiHex16(n)); err != nil {
- return err
- }
- if _, err := e.w.Write(p); err != nil {
- return err
- }
-
- return nil
-}
-
-// Returns the hexadecimal ascii representation of the 16 less
-// significant bits of n. The length of the returned slice will always
-// be 4. Example: if n is 1234 (0x4d2), the return value will be
-// []byte{'0', '4', 'd', '2'}.
-func asciiHex16(n int) []byte {
- var ret [4]byte
- ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12))
- ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8))
- ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4))
- ret[3] = byteToASCIIHex(byte(n & 0x000f))
-
- return ret[:]
-}
-
-// turns a byte into its hexadecimal ascii representation. Example:
-// from 11 (0xb) to 'b'.
-func byteToASCIIHex(n byte) byte {
- if n < 10 {
- return '0' + n
- }
-
- return 'a' - 10 + n
-}
-
-// EncodeString works similarly as Encode but payloads are specified as strings.
-func (e *Encoder) EncodeString(payloads ...string) error {
- for _, p := range payloads {
- if err := e.Encode([]byte(p)); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Encodef encodes a single pkt-line with the payload formatted as
-// the format specifier and the rest of the arguments suggest.
-func (e *Encoder) Encodef(format string, a ...interface{}) error {
- return e.EncodeString(
- fmt.Sprintf(format, a...),
- )
-}
diff --git a/formats/packp/pktline/encoder_test.go b/formats/packp/pktline/encoder_test.go
deleted file mode 100644
index 618002d..0000000
--- a/formats/packp/pktline/encoder_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-package pktline_test
-
-import (
- "bytes"
- "os"
- "strings"
- "testing"
-
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-
- . "gopkg.in/check.v1"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-type SuiteEncoder struct{}
-
-var _ = Suite(&SuiteEncoder{})
-
-func (s *SuiteEncoder) TestFlush(c *C) {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
-
- err := e.Flush()
- c.Assert(err, IsNil)
-
- obtained := buf.Bytes()
- c.Assert(obtained, DeepEquals, pktline.FlushPkt)
-}
-
-func (s *SuiteEncoder) TestEncode(c *C) {
- for i, test := range [...]struct {
- input [][]byte
- expected []byte
- }{
- {
- input: [][]byte{
- []byte("hello\n"),
- },
- expected: []byte("000ahello\n"),
- }, {
- input: [][]byte{
- []byte("hello\n"),
- pktline.Flush,
- },
- expected: []byte("000ahello\n0000"),
- }, {
- input: [][]byte{
- []byte("hello\n"),
- []byte("world!\n"),
- []byte("foo"),
- },
- expected: []byte("000ahello\n000bworld!\n0007foo"),
- }, {
- input: [][]byte{
- []byte("hello\n"),
- pktline.Flush,
- []byte("world!\n"),
- []byte("foo"),
- pktline.Flush,
- },
- expected: []byte("000ahello\n0000000bworld!\n0007foo0000"),
- }, {
- input: [][]byte{
- []byte(strings.Repeat("a", pktline.MaxPayloadSize)),
- },
- expected: []byte(
- "fff0" + strings.Repeat("a", pktline.MaxPayloadSize)),
- }, {
- input: [][]byte{
- []byte(strings.Repeat("a", pktline.MaxPayloadSize)),
- []byte(strings.Repeat("b", pktline.MaxPayloadSize)),
- },
- expected: []byte(
- "fff0" + strings.Repeat("a", pktline.MaxPayloadSize) +
- "fff0" + strings.Repeat("b", pktline.MaxPayloadSize)),
- },
- } {
- comment := Commentf("input %d = %v\n", i, test.input)
-
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
-
- err := e.Encode(test.input...)
- c.Assert(err, IsNil, comment)
-
- c.Assert(buf.Bytes(), DeepEquals, test.expected, comment)
- }
-}
-
-func (s *SuiteEncoder) TestEncodeErrPayloadTooLong(c *C) {
- for i, input := range [...][][]byte{
- {
- []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)),
- },
- {
- []byte("hello world!"),
- []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)),
- },
- {
- []byte("hello world!"),
- []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)),
- []byte("foo"),
- },
- } {
- comment := Commentf("input %d = %v\n", i, input)
-
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
-
- err := e.Encode(input...)
- c.Assert(err, Equals, pktline.ErrPayloadTooLong, comment)
- }
-}
-
-func (s *SuiteEncoder) TestEncodeStrings(c *C) {
- for i, test := range [...]struct {
- input []string
- expected []byte
- }{
- {
- input: []string{
- "hello\n",
- },
- expected: []byte("000ahello\n"),
- }, {
- input: []string{
- "hello\n",
- pktline.FlushString,
- },
- expected: []byte("000ahello\n0000"),
- }, {
- input: []string{
- "hello\n",
- "world!\n",
- "foo",
- },
- expected: []byte("000ahello\n000bworld!\n0007foo"),
- }, {
- input: []string{
- "hello\n",
- pktline.FlushString,
- "world!\n",
- "foo",
- pktline.FlushString,
- },
- expected: []byte("000ahello\n0000000bworld!\n0007foo0000"),
- }, {
- input: []string{
- strings.Repeat("a", pktline.MaxPayloadSize),
- },
- expected: []byte(
- "fff0" + strings.Repeat("a", pktline.MaxPayloadSize)),
- }, {
- input: []string{
- strings.Repeat("a", pktline.MaxPayloadSize),
- strings.Repeat("b", pktline.MaxPayloadSize),
- },
- expected: []byte(
- "fff0" + strings.Repeat("a", pktline.MaxPayloadSize) +
- "fff0" + strings.Repeat("b", pktline.MaxPayloadSize)),
- },
- } {
- comment := Commentf("input %d = %v\n", i, test.input)
-
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
-
- err := e.EncodeString(test.input...)
- c.Assert(err, IsNil, comment)
- c.Assert(buf.Bytes(), DeepEquals, test.expected, comment)
- }
-}
-
-func (s *SuiteEncoder) TestEncodeStringErrPayloadTooLong(c *C) {
- for i, input := range [...][]string{
- {
- strings.Repeat("a", pktline.MaxPayloadSize+1),
- },
- {
- "hello world!",
- strings.Repeat("a", pktline.MaxPayloadSize+1),
- },
- {
- "hello world!",
- strings.Repeat("a", pktline.MaxPayloadSize+1),
- "foo",
- },
- } {
- comment := Commentf("input %d = %v\n", i, input)
-
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
-
- err := e.EncodeString(input...)
- c.Assert(err, Equals, pktline.ErrPayloadTooLong, comment)
- }
-}
-
-func (s *SuiteEncoder) TestEncodef(c *C) {
- format := " %s %d\n"
- str := "foo"
- d := 42
-
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
-
- err := e.Encodef(format, str, d)
- c.Assert(err, IsNil)
-
- expected := []byte("000c foo 42\n")
- c.Assert(buf.Bytes(), DeepEquals, expected)
-}
-
-func ExampleEncoder() {
- // Create an encoder that writes pktlines to stdout.
- e := pktline.NewEncoder(os.Stdout)
-
- // Encode some data as a new pkt-line.
- _ = e.Encode([]byte("data\n")) // error checks removed for brevity
-
- // Encode a flush-pkt.
- _ = e.Flush()
-
- // Encode a couple of byte slices and a flush in one go. Each of
- // them will end up as payloads of their own pktlines.
- _ = e.Encode(
- []byte("hello\n"),
- []byte("world!\n"),
- pktline.Flush,
- )
-
- // You can also encode strings:
- _ = e.EncodeString(
- "foo\n",
- "bar\n",
- pktline.FlushString,
- )
-
- // You can also format and encode a payload:
- _ = e.Encodef(" %s %d\n", "foo", 42)
- // Output:
- // 0009data
- // 0000000ahello
- // 000bworld!
- // 00000008foo
- // 0008bar
- // 0000000c foo 42
-}
diff --git a/formats/packp/pktline/scanner.go b/formats/packp/pktline/scanner.go
deleted file mode 100644
index 3ce2adf..0000000
--- a/formats/packp/pktline/scanner.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package pktline
-
-import (
- "errors"
- "io"
-)
-
-const (
- lenSize = 4
-)
-
-// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found.
-var ErrInvalidPktLen = errors.New("invalid pkt-len found")
-
-// Scanner provides a convenient interface for reading the payloads of a
-// series of pkt-lines. It takes an io.Reader providing the source,
-// which then can be tokenized through repeated calls to the Scan
-// method.
-//
-// After each Scan call, the Bytes method will return the payload of the
-// corresponding pkt-line on a shared buffer, which will be 65516 bytes
-// or smaller. Flush pkt-lines are represented by empty byte slices.
-//
-// Scanning stops at EOF or the first I/O error.
-type Scanner struct {
- r io.Reader // The reader provided by the client
- err error // Sticky error
- payload []byte // Last pkt-payload
- len [lenSize]byte // Last pkt-len
-}
-
-// NewScanner returns a new Scanner to read from r.
-func NewScanner(r io.Reader) *Scanner {
- return &Scanner{
- r: r,
- }
-}
-
-// Err returns the first error encountered by the Scanner.
-func (s *Scanner) Err() error {
- return s.err
-}
-
-// Scan advances the Scanner to the next pkt-line, whose payload will
-// then be available through the Bytes method. Scanning stops at EOF
-// or the first I/O error. After Scan returns false, the Err method
-// will return any error that occurred during scanning, except that if
-// it was io.EOF, Err will return nil.
-func (s *Scanner) Scan() bool {
- var l int
- l, s.err = s.readPayloadLen()
- if s.err == io.EOF {
- s.err = nil
- return false
- }
- if s.err != nil {
- return false
- }
-
- if cap(s.payload) < l {
- s.payload = make([]byte, 0, l)
- }
-
- if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil {
- return false
- }
- s.payload = s.payload[:l]
-
- return true
-}
-
-// Bytes returns the most recent payload generated by a call to Scan.
-// The underlying array may point to data that will be overwritten by a
-// subsequent call to Scan. It does no allocation.
-func (s *Scanner) Bytes() []byte {
- return s.payload
-}
-
-// Method readPayloadLen returns the payload length by reading the
-// pkt-len and substracting the pkt-len size.
-func (s *Scanner) readPayloadLen() (int, error) {
- if _, err := io.ReadFull(s.r, s.len[:]); err != nil {
- if err == io.EOF {
- return 0, err
- }
- return 0, ErrInvalidPktLen
- }
-
- n, err := hexDecode(s.len)
- if err != nil {
- return 0, err
- }
-
- switch {
- case n == 0:
- return 0, nil
- case n <= lenSize:
- return 0, ErrInvalidPktLen
- case n > MaxPayloadSize+lenSize:
- return 0, ErrInvalidPktLen
- default:
- return n - lenSize, nil
- }
-}
-
-// Turns the hexadecimal representation of a number in a byte slice into
-// a number. This function substitute strconv.ParseUint(string(buf), 16,
-// 16) and/or hex.Decode, to avoid generating new strings, thus helping the
-// GC.
-func hexDecode(buf [lenSize]byte) (int, error) {
- var ret int
- for i := 0; i < lenSize; i++ {
- n, err := asciiHexToByte(buf[i])
- if err != nil {
- return 0, ErrInvalidPktLen
- }
- ret = 16*ret + int(n)
- }
- return ret, nil
-}
-
-// turns the hexadecimal ascii representation of a byte into its
-// numerical value. Example: from 'b' to 11 (0xb).
-func asciiHexToByte(b byte) (byte, error) {
- switch {
- case b >= '0' && b <= '9':
- return b - '0', nil
- case b >= 'a' && b <= 'f':
- return b - 'a' + 10, nil
- default:
- return 0, ErrInvalidPktLen
- }
-}
diff --git a/formats/packp/pktline/scanner_test.go b/formats/packp/pktline/scanner_test.go
deleted file mode 100644
index b5a3c7d..0000000
--- a/formats/packp/pktline/scanner_test.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package pktline_test
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-
- . "gopkg.in/check.v1"
-)
-
-type SuiteScanner struct{}
-
-var _ = Suite(&SuiteScanner{})
-
-func (s *SuiteScanner) TestInvalid(c *C) {
- for _, test := range [...]string{
- "0001", "0002", "0003", "0004",
- "0001asdfsadf", "0004foo",
- "fff1", "fff2",
- "gorka",
- "0", "003",
- " 5a", "5 a", "5 \n",
- "-001", "-000",
- } {
- r := strings.NewReader(test)
- sc := pktline.NewScanner(r)
- _ = sc.Scan()
- c.Assert(sc.Err(), ErrorMatches, pktline.ErrInvalidPktLen.Error(),
- Commentf("data = %q", test))
- }
-}
-
-func (s *SuiteScanner) TestEmptyReader(c *C) {
- r := strings.NewReader("")
- sc := pktline.NewScanner(r)
- hasPayload := sc.Scan()
- c.Assert(hasPayload, Equals, false)
- c.Assert(sc.Err(), Equals, nil)
-}
-
-func (s *SuiteScanner) TestFlush(c *C) {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
- err := e.Flush()
- c.Assert(err, IsNil)
-
- sc := pktline.NewScanner(&buf)
- c.Assert(sc.Scan(), Equals, true)
-
- payload := sc.Bytes()
- c.Assert(len(payload), Equals, 0)
-}
-
-func (s *SuiteScanner) TestPktLineTooShort(c *C) {
- r := strings.NewReader("010cfoobar")
-
- sc := pktline.NewScanner(r)
-
- c.Assert(sc.Scan(), Equals, false)
- c.Assert(sc.Err(), ErrorMatches, "unexpected EOF")
-}
-
-func (s *SuiteScanner) TestScanAndPayload(c *C) {
- for _, test := range [...]string{
- "a",
- "a\n",
- strings.Repeat("a", 100),
- strings.Repeat("a", 100) + "\n",
- strings.Repeat("\x00", 100),
- strings.Repeat("\x00", 100) + "\n",
- strings.Repeat("a", pktline.MaxPayloadSize),
- strings.Repeat("a", pktline.MaxPayloadSize-1) + "\n",
- } {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
- err := e.EncodeString(test)
- c.Assert(err, IsNil,
- Commentf("input len=%x, contents=%.10q\n", len(test), test))
-
- sc := pktline.NewScanner(&buf)
- c.Assert(sc.Scan(), Equals, true,
- Commentf("test = %.20q...", test))
-
- obtained := sc.Bytes()
- c.Assert(obtained, DeepEquals, []byte(test),
- Commentf("in = %.20q out = %.20q", test, string(obtained)))
- }
-}
-
-func (s *SuiteScanner) TestSkip(c *C) {
- for _, test := range [...]struct {
- input []string
- n int
- expected []byte
- }{
- {
- input: []string{
- "first",
- "second",
- "third"},
- n: 1,
- expected: []byte("second"),
- },
- {
- input: []string{
- "first",
- "second",
- "third"},
- n: 2,
- expected: []byte("third"),
- },
- } {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
- err := e.EncodeString(test.input...)
- c.Assert(err, IsNil)
-
- sc := pktline.NewScanner(&buf)
- for i := 0; i < test.n; i++ {
- c.Assert(sc.Scan(), Equals, true,
- Commentf("scan error = %s", sc.Err()))
- }
- c.Assert(sc.Scan(), Equals, true,
- Commentf("scan error = %s", sc.Err()))
-
- obtained := sc.Bytes()
- c.Assert(obtained, DeepEquals, test.expected,
- Commentf("\nin = %.20q\nout = %.20q\nexp = %.20q",
- test.input, obtained, test.expected))
- }
-}
-
-func (s *SuiteScanner) TestEOF(c *C) {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
- err := e.EncodeString("first", "second")
- c.Assert(err, IsNil)
-
- sc := pktline.NewScanner(&buf)
- for sc.Scan() {
- }
- c.Assert(sc.Err(), IsNil)
-}
-
-// A section are several non flush-pkt lines followed by a flush-pkt, which
-// how the git protocol sends long messages.
-func (s *SuiteScanner) TestReadSomeSections(c *C) {
- nSections := 2
- nLines := 4
- data := sectionsExample(c, nSections, nLines)
- sc := pktline.NewScanner(data)
-
- sectionCounter := 0
- lineCounter := 0
- for sc.Scan() {
- if len(sc.Bytes()) == 0 {
- sectionCounter++
- }
- lineCounter++
- }
- c.Assert(sc.Err(), IsNil)
- c.Assert(sectionCounter, Equals, nSections)
- c.Assert(lineCounter, Equals, (1+nLines)*nSections)
-}
-
-// returns nSection sections, each of them with nLines pkt-lines (not
-// counting the flush-pkt:
-//
-// 0009 0.0\n
-// 0009 0.1\n
-// ...
-// 0000
-// and so on
-func sectionsExample(c *C, nSections, nLines int) io.Reader {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
-
- for section := 0; section < nSections; section++ {
- ss := []string{}
- for line := 0; line < nLines; line++ {
- line := fmt.Sprintf(" %d.%d\n", section, line)
- ss = append(ss, line)
- }
- err := e.EncodeString(ss...)
- c.Assert(err, IsNil)
- err = e.Flush()
- c.Assert(err, IsNil)
- }
-
- return &buf
-}
-
-func ExampleScanner() {
- // A reader is needed as input.
- input := strings.NewReader("000ahello\n" +
- "000bworld!\n" +
- "0000",
- )
-
- // Create the scanner...
- s := pktline.NewScanner(input)
-
- // and scan every pkt-line found in the input.
- for s.Scan() {
- payload := s.Bytes()
- if len(payload) == 0 { // zero sized payloads correspond to flush-pkts.
- fmt.Println("FLUSH-PKT DETECTED\n")
- } else { // otherwise, you will be able to access the full payload.
- fmt.Printf("PAYLOAD = %q\n", string(payload))
- }
- }
-
- // this will catch any error when reading from the input, if any.
- if s.Err() != nil {
- fmt.Println(s.Err())
- }
-
- // Output:
- // PAYLOAD = "hello\n"
- // PAYLOAD = "world!\n"
- // FLUSH-PKT DETECTED
-}
diff --git a/formats/packp/ulreq/decoder.go b/formats/packp/ulreq/decoder.go
deleted file mode 100644
index 63ccd4d..0000000
--- a/formats/packp/ulreq/decoder.go
+++ /dev/null
@@ -1,287 +0,0 @@
-package ulreq
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "io"
- "strconv"
- "time"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-)
-
-const (
- hashSize = 40
-)
-
-var (
- eol = []byte("\n")
- sp = []byte(" ")
- want = []byte("want ")
- shallow = []byte("shallow ")
- deepen = []byte("deepen")
- deepenCommits = []byte("deepen ")
- deepenSince = []byte("deepen-since ")
- deepenReference = []byte("deepen-not ")
-)
-
-// A Decoder reads and decodes AdvRef values from an input stream.
-type Decoder struct {
- s *pktline.Scanner // a pkt-line scanner from the input stream
- line []byte // current pkt-line contents, use parser.nextLine() to make it advance
- nLine int // current pkt-line number for debugging, begins at 1
- err error // sticky error, use the parser.error() method to fill this out
- data *UlReq // parsed data is stored here
-}
-
-// NewDecoder returns a new decoder that reads from r.
-//
-// Will not read more data from r than necessary.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{
- s: pktline.NewScanner(r),
- }
-}
-
-// Decode reads the next upload-request form its input and
-// stores it in the value pointed to by v.
-func (d *Decoder) Decode(v *UlReq) error {
- d.data = v
-
- for state := decodeFirstWant; state != nil; {
- state = state(d)
- }
-
- return d.err
-}
-
-type decoderStateFn func(*Decoder) decoderStateFn
-
-// fills out the parser stiky error
-func (d *Decoder) error(format string, a ...interface{}) {
- d.err = fmt.Errorf("pkt-line %d: %s", d.nLine,
- fmt.Sprintf(format, a...))
-}
-
-// Reads a new pkt-line from the scanner, makes its payload available as
-// p.line and increments p.nLine. A successful invocation returns true,
-// otherwise, false is returned and the sticky error is filled out
-// accordingly. Trims eols at the end of the payloads.
-func (d *Decoder) nextLine() bool {
- d.nLine++
-
- if !d.s.Scan() {
- if d.err = d.s.Err(); d.err != nil {
- return false
- }
-
- d.error("EOF")
- return false
- }
-
- d.line = d.s.Bytes()
- d.line = bytes.TrimSuffix(d.line, eol)
-
- return true
-}
-
-// Expected format: want <hash>[ capabilities]
-func decodeFirstWant(d *Decoder) decoderStateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if !bytes.HasPrefix(d.line, want) {
- d.error("missing 'want ' prefix")
- return nil
- }
- d.line = bytes.TrimPrefix(d.line, want)
-
- hash, ok := d.readHash()
- if !ok {
- return nil
- }
- d.data.Wants = append(d.data.Wants, hash)
-
- return decodeCaps
-}
-
-func (d *Decoder) readHash() (core.Hash, bool) {
- if len(d.line) < hashSize {
- d.err = fmt.Errorf("malformed hash: %v", d.line)
- return core.ZeroHash, false
- }
-
- var hash core.Hash
- if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil {
- d.error("invalid hash text: %s", err)
- return core.ZeroHash, false
- }
- d.line = d.line[hashSize:]
-
- return hash, true
-}
-
-// Expected format: sp cap1 sp cap2 sp cap3...
-func decodeCaps(d *Decoder) decoderStateFn {
- if len(d.line) == 0 {
- return decodeOtherWants
- }
-
- d.line = bytes.TrimPrefix(d.line, sp)
-
- for _, c := range bytes.Split(d.line, sp) {
- name, values := readCapability(c)
- d.data.Capabilities.Add(name, values...)
- }
-
- return decodeOtherWants
-}
-
-// Capabilities are a single string or a name=value.
-// Even though we are only going to read at moust 1 value, we return
-// a slice of values, as Capability.Add receives that.
-func readCapability(data []byte) (name string, values []string) {
- pair := bytes.SplitN(data, []byte{'='}, 2)
- if len(pair) == 2 {
- values = append(values, string(pair[1]))
- }
-
- return string(pair[0]), values
-}
-
-// Expected format: want <hash>
-func decodeOtherWants(d *Decoder) decoderStateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if bytes.HasPrefix(d.line, shallow) {
- return decodeShallow
- }
-
- if bytes.HasPrefix(d.line, deepen) {
- return decodeDeepen
- }
-
- if len(d.line) == 0 {
- return nil
- }
-
- if !bytes.HasPrefix(d.line, want) {
- d.error("unexpected payload while expecting a want: %q", d.line)
- return nil
- }
- d.line = bytes.TrimPrefix(d.line, want)
-
- hash, ok := d.readHash()
- if !ok {
- return nil
- }
- d.data.Wants = append(d.data.Wants, hash)
-
- return decodeOtherWants
-}
-
-// Expected format: shallow <hash>
-func decodeShallow(d *Decoder) decoderStateFn {
- if bytes.HasPrefix(d.line, deepen) {
- return decodeDeepen
- }
-
- if len(d.line) == 0 {
- return nil
- }
-
- if !bytes.HasPrefix(d.line, shallow) {
- d.error("unexpected payload while expecting a shallow: %q", d.line)
- return nil
- }
- d.line = bytes.TrimPrefix(d.line, shallow)
-
- hash, ok := d.readHash()
- if !ok {
- return nil
- }
- d.data.Shallows = append(d.data.Shallows, hash)
-
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- return decodeShallow
-}
-
-// Expected format: deepen <n> / deepen-since <ul> / deepen-not <ref>
-func decodeDeepen(d *Decoder) decoderStateFn {
- if bytes.HasPrefix(d.line, deepenCommits) {
- return decodeDeepenCommits
- }
-
- if bytes.HasPrefix(d.line, deepenSince) {
- return decodeDeepenSince
- }
-
- if bytes.HasPrefix(d.line, deepenReference) {
- return decodeDeepenReference
- }
-
- if len(d.line) == 0 {
- return nil
- }
-
- d.error("unexpected deepen specification: %q", d.line)
- return nil
-}
-
-func decodeDeepenCommits(d *Decoder) decoderStateFn {
- d.line = bytes.TrimPrefix(d.line, deepenCommits)
-
- var n int
- if n, d.err = strconv.Atoi(string(d.line)); d.err != nil {
- return nil
- }
- if n < 0 {
- d.err = fmt.Errorf("negative depth")
- return nil
- }
- d.data.Depth = DepthCommits(n)
-
- return decodeFlush
-}
-
-func decodeDeepenSince(d *Decoder) decoderStateFn {
- d.line = bytes.TrimPrefix(d.line, deepenSince)
-
- var secs int64
- secs, d.err = strconv.ParseInt(string(d.line), 10, 64)
- if d.err != nil {
- return nil
- }
- t := time.Unix(secs, 0).UTC()
- d.data.Depth = DepthSince(t)
-
- return decodeFlush
-}
-
-func decodeDeepenReference(d *Decoder) decoderStateFn {
- d.line = bytes.TrimPrefix(d.line, deepenReference)
-
- d.data.Depth = DepthReference(string(d.line))
-
- return decodeFlush
-}
-
-func decodeFlush(d *Decoder) decoderStateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if len(d.line) != 0 {
- d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line)
- }
-
- return nil
-}
diff --git a/formats/packp/ulreq/decoder_test.go b/formats/packp/ulreq/decoder_test.go
deleted file mode 100644
index b313395..0000000
--- a/formats/packp/ulreq/decoder_test.go
+++ /dev/null
@@ -1,541 +0,0 @@
-package ulreq
-
-import (
- "bytes"
- "io"
- "sort"
- "time"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-
- . "gopkg.in/check.v1"
-)
-
-type SuiteDecoder struct{}
-
-var _ = Suite(&SuiteDecoder{})
-
-func (s *SuiteDecoder) TestEmpty(c *C) {
- ur := New()
- var buf bytes.Buffer
- d := NewDecoder(&buf)
-
- err := d.Decode(ur)
- c.Assert(err, ErrorMatches, "pkt-line 1: EOF")
-}
-
-func (s *SuiteDecoder) TestNoWant(c *C) {
- payloads := []string{
- "foobar",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*missing 'want '.*")
-}
-
-func toPktLines(c *C, payloads []string) io.Reader {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
- err := e.EncodeString(payloads...)
- c.Assert(err, IsNil)
-
- return &buf
-}
-
-func testDecoderErrorMatches(c *C, input io.Reader, pattern string) {
- ur := New()
- d := NewDecoder(input)
-
- err := d.Decode(ur)
- c.Assert(err, ErrorMatches, pattern)
-}
-
-func (s *SuiteDecoder) TestInvalidFirstHash(c *C) {
- payloads := []string{
- "want 6ecf0ef2c2dffb796alberto2219af86ec6584e5\n",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*invalid hash.*")
-}
-
-func (s *SuiteDecoder) TestWantOK(c *C) {
- payloads := []string{
- "want 1111111111111111111111111111111111111111",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- c.Assert(ur.Wants, DeepEquals, []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111"),
- })
-}
-
-func testDecodeOK(c *C, payloads []string) *UlReq {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
- err := e.EncodeString(payloads...)
- c.Assert(err, IsNil)
-
- ur := New()
- d := NewDecoder(&buf)
-
- err = d.Decode(ur)
- c.Assert(err, IsNil)
-
- return ur
-}
-
-func (s *SuiteDecoder) TestWantWithCapabilities(c *C) {
- payloads := []string{
- "want 1111111111111111111111111111111111111111 ofs-delta multi_ack",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
- c.Assert(ur.Wants, DeepEquals, []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111")})
-
- c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
- c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
-}
-
-func (s *SuiteDecoder) TestManyWantsNoCapabilities(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333",
- "want 4444444444444444444444444444444444444444",
- "want 1111111111111111111111111111111111111111",
- "want 2222222222222222222222222222222222222222",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- expected := []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111"),
- core.NewHash("2222222222222222222222222222222222222222"),
- core.NewHash("3333333333333333333333333333333333333333"),
- core.NewHash("4444444444444444444444444444444444444444"),
- }
-
- sort.Sort(byHash(ur.Wants))
- sort.Sort(byHash(expected))
- c.Assert(ur.Wants, DeepEquals, expected)
-}
-
-type byHash []core.Hash
-
-func (a byHash) Len() int { return len(a) }
-func (a byHash) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byHash) Less(i, j int) bool {
- ii := [20]byte(a[i])
- jj := [20]byte(a[j])
- return bytes.Compare(ii[:], jj[:]) < 0
-}
-
-func (s *SuiteDecoder) TestManyWantsBadWant(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333",
- "want 4444444444444444444444444444444444444444",
- "foo",
- "want 2222222222222222222222222222222222222222",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*unexpected payload.*")
-}
-
-func (s *SuiteDecoder) TestManyWantsInvalidHash(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333",
- "want 4444444444444444444444444444444444444444",
- "want 1234567890abcdef",
- "want 2222222222222222222222222222222222222222",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*malformed hash.*")
-}
-
-func (s *SuiteDecoder) TestManyWantsWithCapabilities(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "want 4444444444444444444444444444444444444444",
- "want 1111111111111111111111111111111111111111",
- "want 2222222222222222222222222222222222222222",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- expected := []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111"),
- core.NewHash("2222222222222222222222222222222222222222"),
- core.NewHash("3333333333333333333333333333333333333333"),
- core.NewHash("4444444444444444444444444444444444444444"),
- }
-
- sort.Sort(byHash(ur.Wants))
- sort.Sort(byHash(expected))
- c.Assert(ur.Wants, DeepEquals, expected)
-
- c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
- c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
-}
-
-func (s *SuiteDecoder) TestSingleShallowSingleWant(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- expectedWants := []core.Hash{
- core.NewHash("3333333333333333333333333333333333333333"),
- }
-
- expectedShallows := []core.Hash{
- core.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
- }
-
- c.Assert(ur.Wants, DeepEquals, expectedWants)
- c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
- c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
-
- c.Assert(ur.Shallows, DeepEquals, expectedShallows)
-}
-
-func (s *SuiteDecoder) TestSingleShallowManyWants(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "want 4444444444444444444444444444444444444444",
- "want 1111111111111111111111111111111111111111",
- "want 2222222222222222222222222222222222222222",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- expectedWants := []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111"),
- core.NewHash("2222222222222222222222222222222222222222"),
- core.NewHash("3333333333333333333333333333333333333333"),
- core.NewHash("4444444444444444444444444444444444444444"),
- }
- sort.Sort(byHash(expectedWants))
-
- expectedShallows := []core.Hash{
- core.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
- }
-
- sort.Sort(byHash(ur.Wants))
- c.Assert(ur.Wants, DeepEquals, expectedWants)
- c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
- c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
-
- c.Assert(ur.Shallows, DeepEquals, expectedShallows)
-}
-
-func (s *SuiteDecoder) TestManyShallowSingleWant(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
- "shallow cccccccccccccccccccccccccccccccccccccccc",
- "shallow dddddddddddddddddddddddddddddddddddddddd",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- expectedWants := []core.Hash{
- core.NewHash("3333333333333333333333333333333333333333"),
- }
-
- expectedShallows := []core.Hash{
- core.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
- core.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
- core.NewHash("cccccccccccccccccccccccccccccccccccccccc"),
- core.NewHash("dddddddddddddddddddddddddddddddddddddddd"),
- }
- sort.Sort(byHash(expectedShallows))
-
- c.Assert(ur.Wants, DeepEquals, expectedWants)
- c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
- c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
-
- sort.Sort(byHash(ur.Shallows))
- c.Assert(ur.Shallows, DeepEquals, expectedShallows)
-}
-
-func (s *SuiteDecoder) TestManyShallowManyWants(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "want 4444444444444444444444444444444444444444",
- "want 1111111111111111111111111111111111111111",
- "want 2222222222222222222222222222222222222222",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
- "shallow cccccccccccccccccccccccccccccccccccccccc",
- "shallow dddddddddddddddddddddddddddddddddddddddd",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- expectedWants := []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111"),
- core.NewHash("2222222222222222222222222222222222222222"),
- core.NewHash("3333333333333333333333333333333333333333"),
- core.NewHash("4444444444444444444444444444444444444444"),
- }
- sort.Sort(byHash(expectedWants))
-
- expectedShallows := []core.Hash{
- core.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
- core.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
- core.NewHash("cccccccccccccccccccccccccccccccccccccccc"),
- core.NewHash("dddddddddddddddddddddddddddddddddddddddd"),
- }
- sort.Sort(byHash(expectedShallows))
-
- sort.Sort(byHash(ur.Wants))
- c.Assert(ur.Wants, DeepEquals, expectedWants)
- c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
- c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
-
- sort.Sort(byHash(ur.Shallows))
- c.Assert(ur.Shallows, DeepEquals, expectedShallows)
-}
-
-func (s *SuiteDecoder) TestMalformedShallow(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "shalow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*unexpected payload.*")
-}
-
-func (s *SuiteDecoder) TestMalformedShallowHash(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*malformed hash.*")
-}
-
-func (s *SuiteDecoder) TestMalformedShallowManyShallows(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "shalow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
- "shallow cccccccccccccccccccccccccccccccccccccccc",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*unexpected payload.*")
-}
-
-func (s *SuiteDecoder) TestMalformedDeepenSpec(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "deepen-foo 34",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*unexpected deepen.*")
-}
-
-func (s *SuiteDecoder) TestMalformedDeepenSingleWant(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "depth 32",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*unexpected payload.*")
-}
-
-func (s *SuiteDecoder) TestMalformedDeepenMultiWant(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "want 2222222222222222222222222222222222222222",
- "depth 32",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*unexpected payload.*")
-}
-
-func (s *SuiteDecoder) TestMalformedDeepenWithSingleShallow(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "shallow 2222222222222222222222222222222222222222",
- "depth 32",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*unexpected payload.*")
-}
-
-func (s *SuiteDecoder) TestMalformedDeepenWithMultiShallow(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "shallow 2222222222222222222222222222222222222222",
- "shallow 5555555555555555555555555555555555555555",
- "depth 32",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*unexpected payload.*")
-}
-
-func (s *SuiteDecoder) TestDeepenCommits(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "deepen 1234",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0))
- commits, ok := ur.Depth.(DepthCommits)
- c.Assert(ok, Equals, true)
- c.Assert(int(commits), Equals, 1234)
-}
-
-func (s *SuiteDecoder) TestDeepenCommitsInfiniteInplicit(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "deepen 0",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0))
- commits, ok := ur.Depth.(DepthCommits)
- c.Assert(ok, Equals, true)
- c.Assert(int(commits), Equals, 0)
-}
-
-func (s *SuiteDecoder) TestDeepenCommitsInfiniteExplicit(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0))
- commits, ok := ur.Depth.(DepthCommits)
- c.Assert(ok, Equals, true)
- c.Assert(int(commits), Equals, 0)
-}
-
-func (s *SuiteDecoder) TestMalformedDeepenCommits(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "deepen -32",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*negative depth.*")
-}
-
-func (s *SuiteDecoder) TestDeepenCommitsEmpty(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "deepen ",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*invalid syntax.*")
-}
-
-func (s *SuiteDecoder) TestDeepenSince(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "deepen-since 1420167845", // 2015-01-02T03:04:05+00:00
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- expected := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC)
-
- c.Assert(ur.Depth, FitsTypeOf, DepthSince(time.Now()))
- since, ok := ur.Depth.(DepthSince)
- c.Assert(ok, Equals, true)
- c.Assert(time.Time(since).Equal(expected), Equals, true,
- Commentf("obtained=%s\nexpected=%s", time.Time(since), expected))
-}
-
-func (s *SuiteDecoder) TestDeepenReference(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "deepen-not refs/heads/master",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- expected := "refs/heads/master"
-
- c.Assert(ur.Depth, FitsTypeOf, DepthReference(""))
- reference, ok := ur.Depth.(DepthReference)
- c.Assert(ok, Equals, true)
- c.Assert(string(reference), Equals, expected)
-}
-
-func (s *SuiteDecoder) TestAll(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "want 4444444444444444444444444444444444444444",
- "want 1111111111111111111111111111111111111111",
- "want 2222222222222222222222222222222222222222",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
- "shallow cccccccccccccccccccccccccccccccccccccccc",
- "shallow dddddddddddddddddddddddddddddddddddddddd",
- "deepen 1234",
- pktline.FlushString,
- }
- ur := testDecodeOK(c, payloads)
-
- expectedWants := []core.Hash{
- core.NewHash("1111111111111111111111111111111111111111"),
- core.NewHash("2222222222222222222222222222222222222222"),
- core.NewHash("3333333333333333333333333333333333333333"),
- core.NewHash("4444444444444444444444444444444444444444"),
- }
- sort.Sort(byHash(expectedWants))
- sort.Sort(byHash(ur.Wants))
- c.Assert(ur.Wants, DeepEquals, expectedWants)
-
- c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
- c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
-
- expectedShallows := []core.Hash{
- core.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
- core.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
- core.NewHash("cccccccccccccccccccccccccccccccccccccccc"),
- core.NewHash("dddddddddddddddddddddddddddddddddddddddd"),
- }
- sort.Sort(byHash(expectedShallows))
- sort.Sort(byHash(ur.Shallows))
- c.Assert(ur.Shallows, DeepEquals, expectedShallows)
-
- c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0))
- commits, ok := ur.Depth.(DepthCommits)
- c.Assert(ok, Equals, true)
- c.Assert(int(commits), Equals, 1234)
-}
-
-func (s *SuiteDecoder) TestExtraData(c *C) {
- payloads := []string{
- "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
- "deepen 32",
- "foo",
- pktline.FlushString,
- }
- r := toPktLines(c, payloads)
- testDecoderErrorMatches(c, r, ".*unexpected payload.*")
-}
diff --git a/formats/packp/ulreq/encoder.go b/formats/packp/ulreq/encoder.go
deleted file mode 100644
index 1e40b63..0000000
--- a/formats/packp/ulreq/encoder.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package ulreq
-
-import (
- "fmt"
- "io"
- "sort"
- "time"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-)
-
-// An Encoder writes UlReq values to an output stream.
-type Encoder struct {
- pe *pktline.Encoder // where to write the encoded data
- data *UlReq // the data to encode
- sortedWants []string
- err error // sticky error
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- pe: pktline.NewEncoder(w),
- }
-}
-
-// Encode writes the UlReq encoding of v to the stream.
-//
-// All the payloads will end with a newline character. Wants and
-// shallows are sorted alphabetically. A depth of 0 means no depth
-// request is sent.
-func (e *Encoder) Encode(v *UlReq) error {
- if len(v.Wants) == 0 {
- return fmt.Errorf("empty wants provided")
- }
-
- e.data = v
- e.sortedWants = sortHashes(v.Wants)
-
- for state := encodeFirstWant; state != nil; {
- state = state(e)
- }
-
- return e.err
-}
-
-type encoderStateFn func(*Encoder) encoderStateFn
-
-func sortHashes(list []core.Hash) []string {
- sorted := make([]string, len(list))
- for i, hash := range list {
- sorted[i] = hash.String()
- }
- sort.Strings(sorted)
-
- return sorted
-}
-
-func encodeFirstWant(e *Encoder) encoderStateFn {
- var err error
- if e.data.Capabilities.IsEmpty() {
- err = e.pe.Encodef("want %s\n", e.sortedWants[0])
- } else {
- e.data.Capabilities.Sort()
- err = e.pe.Encodef(
- "want %s %s\n",
- e.sortedWants[0],
- e.data.Capabilities.String(),
- )
- }
- if err != nil {
- e.err = fmt.Errorf("encoding first want line: %s", err)
- return nil
- }
-
- return encodeAditionalWants
-}
-
-func encodeAditionalWants(e *Encoder) encoderStateFn {
- for _, w := range e.sortedWants[1:] {
- if err := e.pe.Encodef("want %s\n", w); err != nil {
- e.err = fmt.Errorf("encoding want %q: %s", w, err)
- return nil
- }
- }
-
- return encodeShallows
-}
-
-func encodeShallows(e *Encoder) encoderStateFn {
- sorted := sortHashes(e.data.Shallows)
- for _, s := range sorted {
- if err := e.pe.Encodef("shallow %s\n", s); err != nil {
- e.err = fmt.Errorf("encoding shallow %q: %s", s, err)
- return nil
- }
- }
-
- return encodeDepth
-}
-
-func encodeDepth(e *Encoder) encoderStateFn {
- switch depth := e.data.Depth.(type) {
- case DepthCommits:
- if depth != 0 {
- commits := int(depth)
- if err := e.pe.Encodef("deepen %d\n", commits); err != nil {
- e.err = fmt.Errorf("encoding depth %d: %s", depth, err)
- return nil
- }
- }
- case DepthSince:
- when := time.Time(depth).UTC()
- if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil {
- e.err = fmt.Errorf("encoding depth %s: %s", when, err)
- return nil
- }
- case DepthReference:
- reference := string(depth)
- if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil {
- e.err = fmt.Errorf("encoding depth %s: %s", reference, err)
- return nil
- }
- default:
- e.err = fmt.Errorf("unsupported depth type")
- return nil
- }
-
- return encodeFlush
-}
-
-func encodeFlush(e *Encoder) encoderStateFn {
- if err := e.pe.Flush(); err != nil {
- e.err = fmt.Errorf("encoding flush-pkt: %s", err)
- return nil
- }
-
- return nil
-}
diff --git a/formats/packp/ulreq/encoder_test.go b/formats/packp/ulreq/encoder_test.go
deleted file mode 100644
index 56a8c2a..0000000
--- a/formats/packp/ulreq/encoder_test.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package ulreq
-
-import (
- "bytes"
- "time"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-
- . "gopkg.in/check.v1"
-)
-
-type SuiteEncoder struct{}
-
-var _ = Suite(&SuiteEncoder{})
-
-// returns a byte slice with the pkt-lines for the given payloads.
-func pktlines(c *C, payloads ...string) []byte {
- var buf bytes.Buffer
- e := pktline.NewEncoder(&buf)
-
- err := e.EncodeString(payloads...)
- c.Assert(err, IsNil, Commentf("building pktlines for %v\n", payloads))
-
- return buf.Bytes()
-}
-
-func testEncode(c *C, ur *UlReq, expectedPayloads []string) {
- var buf bytes.Buffer
- e := NewEncoder(&buf)
-
- err := e.Encode(ur)
- c.Assert(err, IsNil)
- obtained := buf.Bytes()
-
- expected := pktlines(c, expectedPayloads...)
-
- comment := Commentf("\nobtained = %s\nexpected = %s\n", string(obtained), string(expected))
-
- c.Assert(obtained, DeepEquals, expected, comment)
-}
-
-func testEncodeError(c *C, ur *UlReq, expectedErrorRegEx string) {
- var buf bytes.Buffer
- e := NewEncoder(&buf)
-
- err := e.Encode(ur)
- c.Assert(err, ErrorMatches, expectedErrorRegEx)
-}
-
-func (s *SuiteEncoder) TestZeroValue(c *C) {
- ur := New()
- expectedErrorRegEx := ".*empty wants.*"
-
- testEncodeError(c, ur, expectedErrorRegEx)
-}
-
-func (s *SuiteEncoder) TestOneWant(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
-
- expected := []string{
- "want 1111111111111111111111111111111111111111\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestOneWantWithCapabilities(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- ur.Capabilities.Add("sysref", "HEAD:/refs/heads/master")
- ur.Capabilities.Add("multi_ack")
- ur.Capabilities.Add("thin-pack")
- ur.Capabilities.Add("side-band")
- ur.Capabilities.Add("ofs-delta")
-
- expected := []string{
- "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band sysref=HEAD:/refs/heads/master thin-pack\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestWants(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("4444444444444444444444444444444444444444"))
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- ur.Wants = append(ur.Wants, core.NewHash("3333333333333333333333333333333333333333"))
- ur.Wants = append(ur.Wants, core.NewHash("2222222222222222222222222222222222222222"))
- ur.Wants = append(ur.Wants, core.NewHash("5555555555555555555555555555555555555555"))
-
- expected := []string{
- "want 1111111111111111111111111111111111111111\n",
- "want 2222222222222222222222222222222222222222\n",
- "want 3333333333333333333333333333333333333333\n",
- "want 4444444444444444444444444444444444444444\n",
- "want 5555555555555555555555555555555555555555\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestWantsWithCapabilities(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("4444444444444444444444444444444444444444"))
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- ur.Wants = append(ur.Wants, core.NewHash("3333333333333333333333333333333333333333"))
- ur.Wants = append(ur.Wants, core.NewHash("2222222222222222222222222222222222222222"))
- ur.Wants = append(ur.Wants, core.NewHash("5555555555555555555555555555555555555555"))
-
- ur.Capabilities.Add("sysref", "HEAD:/refs/heads/master")
- ur.Capabilities.Add("multi_ack")
- ur.Capabilities.Add("thin-pack")
- ur.Capabilities.Add("side-band")
- ur.Capabilities.Add("ofs-delta")
-
- expected := []string{
- "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band sysref=HEAD:/refs/heads/master thin-pack\n",
- "want 2222222222222222222222222222222222222222\n",
- "want 3333333333333333333333333333333333333333\n",
- "want 4444444444444444444444444444444444444444\n",
- "want 5555555555555555555555555555555555555555\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestShallow(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- ur.Capabilities.Add("multi_ack")
- ur.Shallows = append(ur.Shallows, core.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
-
- expected := []string{
- "want 1111111111111111111111111111111111111111 multi_ack\n",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestManyShallows(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- ur.Capabilities.Add("multi_ack")
- ur.Shallows = append(ur.Shallows, core.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))
- ur.Shallows = append(ur.Shallows, core.NewHash("dddddddddddddddddddddddddddddddddddddddd"))
- ur.Shallows = append(ur.Shallows, core.NewHash("cccccccccccccccccccccccccccccccccccccccc"))
- ur.Shallows = append(ur.Shallows, core.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
-
- expected := []string{
- "want 1111111111111111111111111111111111111111 multi_ack\n",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n",
- "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n",
- "shallow cccccccccccccccccccccccccccccccccccccccc\n",
- "shallow dddddddddddddddddddddddddddddddddddddddd\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestDepthCommits(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- ur.Depth = DepthCommits(1234)
-
- expected := []string{
- "want 1111111111111111111111111111111111111111\n",
- "deepen 1234\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestDepthSinceUTC(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- since := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC)
- ur.Depth = DepthSince(since)
-
- expected := []string{
- "want 1111111111111111111111111111111111111111\n",
- "deepen-since 1420167845\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestDepthSinceNonUTC(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- berlin, err := time.LoadLocation("Europe/Berlin")
- c.Assert(err, IsNil)
- since := time.Date(2015, time.January, 2, 3, 4, 5, 0, berlin)
- // since value is 2015-01-02 03:04:05 +0100 UTC (Europe/Berlin) or
- // 2015-01-02 02:04:05 +0000 UTC, which is 1420164245 Unix seconds.
- ur.Depth = DepthSince(since)
-
- expected := []string{
- "want 1111111111111111111111111111111111111111\n",
- "deepen-since 1420164245\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestDepthReference(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- ur.Depth = DepthReference("refs/heads/feature-foo")
-
- expected := []string{
- "want 1111111111111111111111111111111111111111\n",
- "deepen-not refs/heads/feature-foo\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
-
-func (s *SuiteEncoder) TestAll(c *C) {
- ur := New()
- ur.Wants = append(ur.Wants, core.NewHash("4444444444444444444444444444444444444444"))
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- ur.Wants = append(ur.Wants, core.NewHash("3333333333333333333333333333333333333333"))
- ur.Wants = append(ur.Wants, core.NewHash("2222222222222222222222222222222222222222"))
- ur.Wants = append(ur.Wants, core.NewHash("5555555555555555555555555555555555555555"))
-
- ur.Capabilities.Add("sysref", "HEAD:/refs/heads/master")
- ur.Capabilities.Add("multi_ack")
- ur.Capabilities.Add("thin-pack")
- ur.Capabilities.Add("side-band")
- ur.Capabilities.Add("ofs-delta")
-
- ur.Shallows = append(ur.Shallows, core.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))
- ur.Shallows = append(ur.Shallows, core.NewHash("dddddddddddddddddddddddddddddddddddddddd"))
- ur.Shallows = append(ur.Shallows, core.NewHash("cccccccccccccccccccccccccccccccccccccccc"))
- ur.Shallows = append(ur.Shallows, core.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
-
- since := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC)
- ur.Depth = DepthSince(since)
-
- expected := []string{
- "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band sysref=HEAD:/refs/heads/master thin-pack\n",
- "want 2222222222222222222222222222222222222222\n",
- "want 3333333333333333333333333333333333333333\n",
- "want 4444444444444444444444444444444444444444\n",
- "want 5555555555555555555555555555555555555555\n",
- "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n",
- "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n",
- "shallow cccccccccccccccccccccccccccccccccccccccc\n",
- "shallow dddddddddddddddddddddddddddddddddddddddd\n",
- "deepen-since 1420167845\n",
- pktline.FlushString,
- }
-
- testEncode(c, ur, expected)
-}
diff --git a/formats/packp/ulreq/ulreq.go b/formats/packp/ulreq/ulreq.go
deleted file mode 100644
index e47450a..0000000
--- a/formats/packp/ulreq/ulreq.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Package ulreq implements encoding and decoding upload-request
-// messages from a git-upload-pack command.
-package ulreq
-
-import (
- "time"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp"
-)
-
-// UlReq values represent the information transmitted on a
-// upload-request message. Values from this type are not zero-value
-// safe, use the New function instead.
-type UlReq struct {
- Capabilities *packp.Capabilities
- Wants []core.Hash
- Shallows []core.Hash
- Depth Depth
-}
-
-// Depth values stores the desired depth of the requested packfile: see
-// DepthCommit, DepthSince and DepthReference.
-type Depth interface {
- isDepth()
-}
-
-// DepthCommits values stores the maximum number of requested commits in
-// the packfile. Zero means infinite. A negative value will have
-// undefined consecuences.
-type DepthCommits int
-
-func (d DepthCommits) isDepth() {}
-
-// DepthSince values requests only commits newer than the specified time.
-type DepthSince time.Time
-
-func (d DepthSince) isDepth() {}
-
-// DepthReference requests only commits not to found in the specified reference.
-type DepthReference string
-
-func (d DepthReference) isDepth() {}
-
-// New returns a pointer to a new UlReq value, ready to be used. It has
-// no capabilities, wants or shallows and an infinite depth. Please
-// note that to encode an upload-request it has to have at least one
-// wanted hash.
-func New() *UlReq {
- return &UlReq{
- Capabilities: packp.NewCapabilities(),
- Wants: []core.Hash{},
- Shallows: []core.Hash{},
- Depth: DepthCommits(0),
- }
-}
diff --git a/formats/packp/ulreq/ulreq_test.go b/formats/packp/ulreq/ulreq_test.go
deleted file mode 100644
index 2c5e85a..0000000
--- a/formats/packp/ulreq/ulreq_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package ulreq
-
-import (
- "fmt"
- "os"
- "strings"
- "testing"
- "time"
-
- "gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/formats/packp/pktline"
-
- . "gopkg.in/check.v1"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-func ExampleEncoder_Encode() {
- // Create an empty UlReq with the contents you want...
- ur := New()
-
- // Add a couple of wants
- ur.Wants = append(ur.Wants, core.NewHash("3333333333333333333333333333333333333333"))
- ur.Wants = append(ur.Wants, core.NewHash("1111111111111111111111111111111111111111"))
- ur.Wants = append(ur.Wants, core.NewHash("2222222222222222222222222222222222222222"))
-
- // And some capabilities you will like the server to use
- ur.Capabilities.Add("sysref", "HEAD:/refs/heads/master")
- ur.Capabilities.Add("ofs-delta")
-
- // Add a couple of shallows
- ur.Shallows = append(ur.Shallows, core.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))
- ur.Shallows = append(ur.Shallows, core.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
-
- // And retrict the answer of the server to commits newer than "2015-01-02 03:04:05 UTC"
- since := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC)
- ur.Depth = DepthSince(since)
-
- // Create a new Encode for the stdout...
- e := NewEncoder(os.Stdout)
- // ...and encode the upload-request to it.
- _ = e.Encode(ur) // ignoring errors for brevity
- // Output:
- // 005bwant 1111111111111111111111111111111111111111 ofs-delta sysref=HEAD:/refs/heads/master
- // 0032want 2222222222222222222222222222222222222222
- // 0032want 3333333333333333333333333333333333333333
- // 0035shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
- // 0035shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
- // 001cdeepen-since 1420167845
- // 0000
-}
-
-func ExampleDecoder_Decode() {
- // Here is a raw advertised-ref message.
- raw := "" +
- "005bwant 1111111111111111111111111111111111111111 ofs-delta sysref=HEAD:/refs/heads/master\n" +
- "0032want 2222222222222222222222222222222222222222\n" +
- "0032want 3333333333333333333333333333333333333333\n" +
- "0035shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" +
- "0035shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n" +
- "001cdeepen-since 1420167845\n" + // 2015-01-02 03:04:05 +0000 UTC
- pktline.FlushString
-
- // Use the raw message as our input.
- input := strings.NewReader(raw)
-
- // Create the Decoder reading from our input.
- d := NewDecoder(input)
-
- // Decode the input into a newly allocated UlReq value.
- ur := New()
- _ = d.Decode(ur) // error check ignored for brevity
-
- // Do something interesting with the UlReq, e.g. print its contents.
- fmt.Println("capabilities =", ur.Capabilities.String())
- fmt.Println("wants =", ur.Wants)
- fmt.Println("shallows =", ur.Shallows)
- switch depth := ur.Depth.(type) {
- case DepthCommits:
- fmt.Println("depth =", int(depth))
- case DepthSince:
- fmt.Println("depth =", time.Time(depth))
- case DepthReference:
- fmt.Println("depth =", string(depth))
- }
- // Output:
- // capabilities = ofs-delta sysref=HEAD:/refs/heads/master
- // wants = [1111111111111111111111111111111111111111 2222222222222222222222222222222222222222 3333333333333333333333333333333333333333]
- // shallows = [aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]
- // depth = 2015-01-02 03:04:05 +0000 UTC
-}