aboutsummaryrefslogtreecommitdiffstats
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/cpuguy83/go-md2man/LICENSE.md21
-rw-r--r--vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go20
-rw-r--r--vendor/github.com/cpuguy83/go-md2man/md2man/roff.go285
-rw-r--r--vendor/github.com/russross/blackfriday/.gitignore8
-rw-r--r--vendor/github.com/russross/blackfriday/.travis.yml30
-rw-r--r--vendor/github.com/russross/blackfriday/LICENSE.txt29
-rw-r--r--vendor/github.com/russross/blackfriday/README.md363
-rw-r--r--vendor/github.com/russross/blackfriday/block.go1450
-rw-r--r--vendor/github.com/russross/blackfriday/doc.go32
-rw-r--r--vendor/github.com/russross/blackfriday/html.go950
-rw-r--r--vendor/github.com/russross/blackfriday/inline.go1154
-rw-r--r--vendor/github.com/russross/blackfriday/latex.go332
-rw-r--r--vendor/github.com/russross/blackfriday/markdown.go931
-rw-r--r--vendor/github.com/russross/blackfriday/smartypants.go430
-rw-r--r--vendor/github.com/spf13/cobra/doc/man_docs.go236
-rw-r--r--vendor/github.com/spf13/cobra/doc/man_docs.md31
-rw-r--r--vendor/github.com/spf13/cobra/doc/md_docs.go159
-rw-r--r--vendor/github.com/spf13/cobra/doc/md_docs.md115
-rw-r--r--vendor/github.com/spf13/cobra/doc/rest_docs.go185
-rw-r--r--vendor/github.com/spf13/cobra/doc/rest_docs.md114
-rw-r--r--vendor/github.com/spf13/cobra/doc/util.go51
-rw-r--r--vendor/github.com/spf13/cobra/doc/yaml_docs.go169
-rw-r--r--vendor/github.com/spf13/cobra/doc/yaml_docs.md112
-rw-r--r--vendor/gopkg.in/yaml.v2/.travis.yml12
-rw-r--r--vendor/gopkg.in/yaml.v2/LICENSE201
-rw-r--r--vendor/gopkg.in/yaml.v2/LICENSE.libyaml31
-rw-r--r--vendor/gopkg.in/yaml.v2/NOTICE13
-rw-r--r--vendor/gopkg.in/yaml.v2/README.md133
-rw-r--r--vendor/gopkg.in/yaml.v2/apic.go739
-rw-r--r--vendor/gopkg.in/yaml.v2/decode.go775
-rw-r--r--vendor/gopkg.in/yaml.v2/emitterc.go1685
-rw-r--r--vendor/gopkg.in/yaml.v2/encode.go362
-rw-r--r--vendor/gopkg.in/yaml.v2/go.mod5
-rw-r--r--vendor/gopkg.in/yaml.v2/parserc.go1095
-rw-r--r--vendor/gopkg.in/yaml.v2/readerc.go412
-rw-r--r--vendor/gopkg.in/yaml.v2/resolve.go258
-rw-r--r--vendor/gopkg.in/yaml.v2/scannerc.go2696
-rw-r--r--vendor/gopkg.in/yaml.v2/sorter.go113
-rw-r--r--vendor/gopkg.in/yaml.v2/writerc.go26
-rw-r--r--vendor/gopkg.in/yaml.v2/yaml.go466
-rw-r--r--vendor/gopkg.in/yaml.v2/yamlh.go738
-rw-r--r--vendor/gopkg.in/yaml.v2/yamlprivateh.go173
42 files changed, 17140 insertions, 0 deletions
diff --git a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md
new file mode 100644
index 00000000..1cade6ce
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Brian Goff
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
new file mode 100644
index 00000000..af62279a
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
@@ -0,0 +1,20 @@
+package md2man
+
+import (
+ "github.com/russross/blackfriday"
+)
+
+// Render converts a markdown document into a roff formatted document.
+func Render(doc []byte) []byte {
+ renderer := RoffRenderer(0)
+ extensions := 0
+ extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS
+ extensions |= blackfriday.EXTENSION_TABLES
+ extensions |= blackfriday.EXTENSION_FENCED_CODE
+ extensions |= blackfriday.EXTENSION_AUTOLINK
+ extensions |= blackfriday.EXTENSION_SPACE_HEADERS
+ extensions |= blackfriday.EXTENSION_FOOTNOTES
+ extensions |= blackfriday.EXTENSION_TITLEBLOCK
+
+ return blackfriday.Markdown(doc, renderer, extensions)
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
new file mode 100644
index 00000000..8c29ec68
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
@@ -0,0 +1,285 @@
+package md2man
+
+import (
+ "bytes"
+ "fmt"
+ "html"
+ "strings"
+
+ "github.com/russross/blackfriday"
+)
+
+type roffRenderer struct {
+ ListCounters []int
+}
+
+// RoffRenderer creates a new blackfriday Renderer for generating roff documents
+// from markdown
+func RoffRenderer(flags int) blackfriday.Renderer {
+ return &roffRenderer{}
+}
+
+func (r *roffRenderer) GetFlags() int {
+ return 0
+}
+
+func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) {
+ out.WriteString(".TH ")
+
+ splitText := bytes.Split(text, []byte("\n"))
+ for i, line := range splitText {
+ line = bytes.TrimPrefix(line, []byte("% "))
+ if i == 0 {
+ line = bytes.Replace(line, []byte("("), []byte("\" \""), 1)
+ line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1)
+ }
+ line = append([]byte("\""), line...)
+ line = append(line, []byte("\" ")...)
+ out.Write(line)
+ }
+ out.WriteString("\n")
+
+ // disable hyphenation
+ out.WriteString(".nh\n")
+ // disable justification (adjust text to left margin only)
+ out.WriteString(".ad l\n")
+}
+
+func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
+ out.WriteString("\n.PP\n.RS\n\n.nf\n")
+ escapeSpecialChars(out, text)
+ out.WriteString("\n.fi\n.RE\n")
+}
+
+func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) {
+ out.WriteString("\n.PP\n.RS\n")
+ out.Write(text)
+ out.WriteString("\n.RE\n")
+}
+
+func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { // nolint: golint
+ out.Write(text)
+}
+
+func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {
+ marker := out.Len()
+
+ switch {
+ case marker == 0:
+ // This is the doc header
+ out.WriteString(".TH ")
+ case level == 1:
+ out.WriteString("\n\n.SH ")
+ case level == 2:
+ out.WriteString("\n.SH ")
+ default:
+ out.WriteString("\n.SS ")
+ }
+
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+}
+
+func (r *roffRenderer) HRule(out *bytes.Buffer) {
+ out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n")
+}
+
+func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
+ marker := out.Len()
+ r.ListCounters = append(r.ListCounters, 1)
+ out.WriteString("\n.RS\n")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ r.ListCounters = r.ListCounters[:len(r.ListCounters)-1]
+ out.WriteString("\n.RE\n")
+}
+
+func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
+ if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
+ out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", r.ListCounters[len(r.ListCounters)-1]))
+ r.ListCounters[len(r.ListCounters)-1]++
+ } else {
+ out.WriteString(".IP \\(bu 2\n")
+ }
+ out.Write(text)
+ out.WriteString("\n")
+}
+
+func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
+ marker := out.Len()
+ out.WriteString("\n.PP\n")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ if marker != 0 {
+ out.WriteString("\n")
+ }
+}
+
+func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+ out.WriteString("\n.TS\nallbox;\n")
+
+ maxDelims := 0
+ lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n")
+ for _, w := range lines {
+ curDelims := strings.Count(w, "\t")
+ if curDelims > maxDelims {
+ maxDelims = curDelims
+ }
+ }
+ out.Write([]byte(strings.Repeat("l ", maxDelims+1) + "\n"))
+ out.Write([]byte(strings.Repeat("l ", maxDelims+1) + ".\n"))
+ out.Write(header)
+ if len(header) > 0 {
+ out.Write([]byte("\n"))
+ }
+
+ out.Write(body)
+ out.WriteString("\n.TE\n")
+}
+
+func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) {
+ if out.Len() > 0 {
+ out.WriteString("\n")
+ }
+ out.Write(text)
+}
+
+func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString("\t")
+ }
+ if len(text) == 0 {
+ text = []byte{' '}
+ }
+ out.Write([]byte("\\fB\\fC" + string(text) + "\\fR"))
+}
+
+func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString("\t")
+ }
+ if len(text) > 30 {
+ text = append([]byte("T{\n"), text...)
+ text = append(text, []byte("\nT}")...)
+ }
+ if len(text) == 0 {
+ text = []byte{' '}
+ }
+ out.Write(text)
+}
+
+func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
+
+}
+
+func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+
+}
+
+func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {
+ out.WriteString("\n\\[la]")
+ out.Write(link)
+ out.WriteString("\\[ra]")
+}
+
+func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB\\fC")
+ escapeSpecialChars(out, text)
+ out.WriteString("\\fR")
+}
+
+func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fI")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+}
+
+func (r *roffRenderer) LineBreak(out *bytes.Buffer) {
+ out.WriteString("\n.br\n")
+}
+
+func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+ out.Write(content)
+ r.AutoLink(out, link, 0)
+}
+
+func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { // nolint: golint
+ out.Write(tag)
+}
+
+func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\s+2")
+ out.Write(text)
+ out.WriteString("\\s-2")
+}
+
+func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) {
+}
+
+func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
+
+}
+
+func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) {
+ out.WriteString(html.UnescapeString(string(entity)))
+}
+
+func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) {
+ escapeSpecialChars(out, text)
+}
+
+func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) {
+}
+
+func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) {
+}
+
+func needsBackslash(c byte) bool {
+ for _, r := range []byte("-_&\\~") {
+ if c == r {
+ return true
+ }
+ }
+ return false
+}
+
+func escapeSpecialChars(out *bytes.Buffer, text []byte) {
+ for i := 0; i < len(text); i++ {
+ // escape initial apostrophe or period
+ if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
+ out.WriteString("\\&")
+ }
+
+ // directly copy normal characters
+ org := i
+
+ for i < len(text) && !needsBackslash(text[i]) {
+ i++
+ }
+ if i > org {
+ out.Write(text[org:i])
+ }
+
+ // escape a character
+ if i >= len(text) {
+ break
+ }
+ out.WriteByte('\\')
+ out.WriteByte(text[i])
+ }
+}
diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore
new file mode 100644
index 00000000..75623dcc
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/.gitignore
@@ -0,0 +1,8 @@
+*.out
+*.swp
+*.8
+*.6
+_obj
+_test*
+markdown
+tags
diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml
new file mode 100644
index 00000000..a1687f17
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/.travis.yml
@@ -0,0 +1,30 @@
+sudo: false
+language: go
+go:
+ - 1.5.4
+ - 1.6.2
+ - tip
+matrix:
+ include:
+ - go: 1.2.2
+ script:
+ - go get -t -v ./...
+ - go test -v -race ./...
+ - go: 1.3.3
+ script:
+ - go get -t -v ./...
+ - go test -v -race ./...
+ - go: 1.4.3
+ script:
+ - go get -t -v ./...
+ - go test -v -race ./...
+ allow_failures:
+ - go: tip
+ fast_finish: true
+install:
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - go tool vet .
+ - go test -v -race ./...
diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt
new file mode 100644
index 00000000..2885af36
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/LICENSE.txt
@@ -0,0 +1,29 @@
+Blackfriday is distributed under the Simplified BSD License:
+
+> Copyright © 2011 Russ Ross
+> All rights reserved.
+>
+> Redistribution and use in source and binary forms, with or without
+> modification, are permitted provided that the following conditions
+> are met:
+>
+> 1. Redistributions of source code must retain the above copyright
+> notice, this list of conditions and the following disclaimer.
+>
+> 2. Redistributions in binary form must reproduce the above
+> copyright notice, this list of conditions and the following
+> disclaimer in the documentation and/or other materials provided with
+> the distribution.
+>
+> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+> POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md
new file mode 100644
index 00000000..a6c94b79
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/README.md
@@ -0,0 +1,363 @@
+Blackfriday
+[![Build Status][BuildSVG]][BuildURL]
+[![Godoc][GodocV2SVG]][GodocV2URL]
+===========
+
+Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
+is paranoid about its input (so you can safely feed it user-supplied
+data), it is fast, it supports common extensions (tables, smart
+punctuation substitutions, etc.), and it is safe for all utf-8
+(unicode) input.
+
+HTML output is currently supported, along with Smartypants
+extensions.
+
+It started as a translation from C of [Sundown][3].
+
+
+Installation
+------------
+
+Blackfriday is compatible with any modern Go release. With Go and git installed:
+
+ go get -u gopkg.in/russross/blackfriday.v2
+
+will download, compile, and install the package into your `$GOPATH` directory
+hierarchy.
+
+
+Versions
+--------
+
+Currently maintained and recommended version of Blackfriday is `v2`. It's being
+developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
+documentation is available at
+https://godoc.org/gopkg.in/russross/blackfriday.v2.
+
+It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`,
+but we highly recommend using package management tool like [dep][7] or
+[Glide][8] and make use of semantic versioning. With package management you
+should import `github.com/russross/blackfriday` and specify that you're using
+version 2.0.0.
+
+Version 2 offers a number of improvements over v1:
+
+* Cleaned up API
+* A separate call to [`Parse`][4], which produces an abstract syntax tree for
+ the document
+* Latest bug fixes
+* Flexibility to easily add your own rendering extensions
+
+Potential drawbacks:
+
+* Our benchmarks show v2 to be slightly slower than v1. Currently in the
+ ballpark of around 15%.
+* API breakage. If you can't afford modifying your code to adhere to the new API
+ and don't care too much about the new features, v2 is probably not for you.
+* Several bug fixes are trailing behind and still need to be forward-ported to
+ v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
+ tracking.
+
+If you are still interested in the legacy `v1`, you can import it from
+`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found
+here: https://godoc.org/github.com/russross/blackfriday
+
+### Known issue with `dep`
+
+There is a known problem with using Blackfriday v1 _transitively_ and `dep`.
+Currently `dep` prioritizes semver versions over anything else, and picks the
+latest one, plus it does not apply a `[[constraint]]` specifier to transitively
+pulled in packages. So if you're using something that uses Blackfriday v1, but
+that something does not use `dep` yet, you will get Blackfriday v2 pulled in and
+your first dependency will fail to build.
+
+There are couple of fixes for it, documented here:
+https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version
+
+Meanwhile, `dep` team is working on a more general solution to the constraints
+on transitive dependencies problem: https://github.com/golang/dep/issues/1124.
+
+
+Usage
+-----
+
+### v1
+
+For basic usage, it is as simple as getting your input into a byte
+slice and calling:
+
+ output := blackfriday.MarkdownBasic(input)
+
+This renders it with no extensions enabled. To get a more useful
+feature set, use this instead:
+
+ output := blackfriday.MarkdownCommon(input)
+
+### v2
+
+For the most sensible markdown processing, it is as simple as getting your input
+into a byte slice and calling:
+
+```go
+output := blackfriday.Run(input)
+```
+
+Your input will be parsed and the output rendered with a set of most popular
+extensions enabled. If you want the most basic feature set, corresponding with
+the bare Markdown specification, use:
+
+```go
+output := blackfriday.Run(input, blackfriday.WithNoExtensions())
+```
+
+### Sanitize untrusted content
+
+Blackfriday itself does nothing to protect against malicious content. If you are
+dealing with user-supplied markdown, we recommend running Blackfriday's output
+through HTML sanitizer such as [Bluemonday][5].
+
+Here's an example of simple usage of Blackfriday together with Bluemonday:
+
+```go
+import (
+ "github.com/microcosm-cc/bluemonday"
+ "gopkg.in/russross/blackfriday.v2"
+)
+
+// ...
+unsafe := blackfriday.Run(input)
+html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
+```
+
+### Custom options, v1
+
+If you want to customize the set of options, first get a renderer
+(currently only the HTML output engine), then use it to
+call the more general `Markdown` function. For examples, see the
+implementations of `MarkdownBasic` and `MarkdownCommon` in
+`markdown.go`.
+
+### Custom options, v2
+
+If you want to customize the set of options, use `blackfriday.WithExtensions`,
+`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
+
+### `blackfriday-tool`
+
+You can also check out `blackfriday-tool` for a more complete example
+of how to use it. Download and install it using:
+
+ go get github.com/russross/blackfriday-tool
+
+This is a simple command-line tool that allows you to process a
+markdown file using a standalone program. You can also browse the
+source directly on github if you are just looking for some example
+code:
+
+* <http://github.com/russross/blackfriday-tool>
+
+Note that if you have not already done so, installing
+`blackfriday-tool` will be sufficient to download and install
+blackfriday in addition to the tool itself. The tool binary will be
+installed in `$GOPATH/bin`. This is a statically-linked binary that
+can be copied to wherever you need it without worrying about
+dependencies and library versions.
+
+### Sanitized anchor names
+
+Blackfriday includes an algorithm for creating sanitized anchor names
+corresponding to a given input text. This algorithm is used to create
+anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The
+algorithm has a specification, so that other packages can create
+compatible anchor names and links to those anchors.
+
+The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names.
+
+[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to
+create compatible links to the anchor names generated by blackfriday.
+This algorithm is also implemented in a small standalone package at
+[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients
+that want a small package and don't need full functionality of blackfriday.
+
+
+Features
+--------
+
+All features of Sundown are supported, including:
+
+* **Compatibility**. The Markdown v1.0.3 test suite passes with
+ the `--tidy` option. Without `--tidy`, the differences are
+ mostly in whitespace and entity escaping, where blackfriday is
+ more consistent and cleaner.
+
+* **Common extensions**, including table support, fenced code
+ blocks, autolinks, strikethroughs, non-strict emphasis, etc.
+
+* **Safety**. Blackfriday is paranoid when parsing, making it safe
+ to feed untrusted user input without fear of bad things
+ happening. The test suite stress tests this and there are no
+ known inputs that make it crash. If you find one, please let me
+ know and send me the input that does it.
+
+ NOTE: "safety" in this context means *runtime safety only*. In order to
+ protect yourself against JavaScript injection in untrusted content, see
+ [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
+
+* **Fast processing**. It is fast enough to render on-demand in
+ most web applications without having to cache the output.
+
+* **Thread safety**. You can run multiple parsers in different
+ goroutines without ill effect. There is no dependence on global
+ shared state.
+
+* **Minimal dependencies**. Blackfriday only depends on standard
+ library packages in Go. The source code is pretty
+ self-contained, so it is easy to add to any project, including
+ Google App Engine projects.
+
+* **Standards compliant**. Output successfully validates using the
+ W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
+
+
+Extensions
+----------
+
+In addition to the standard markdown syntax, this package
+implements the following extensions:
+
+* **Intra-word emphasis supression**. The `_` character is
+ commonly used inside words when discussing code, so having
+ markdown interpret it as an emphasis command is usually the
+ wrong thing. Blackfriday lets you treat all emphasis markers as
+ normal characters when they occur inside a word.
+
+* **Tables**. Tables can be created by drawing them in the input
+ using a simple syntax:
+
+ ```
+ Name | Age
+ --------|------
+ Bob | 27
+ Alice | 23
+ ```
+
+* **Fenced code blocks**. In addition to the normal 4-space
+ indentation to mark code blocks, you can explicitly mark them
+ and supply a language (to make syntax highlighting simple). Just
+ mark it like this:
+
+ ``` go
+ func getTrue() bool {
+ return true
+ }
+ ```
+
+ You can use 3 or more backticks to mark the beginning of the
+ block, and the same number to mark the end of the block.
+
+ To preserve classes of fenced code blocks while using the bluemonday
+ HTML sanitizer, use the following policy:
+
+ ``` go
+ p := bluemonday.UGCPolicy()
+ p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code")
+ html := p.SanitizeBytes(unsafe)
+ ```
+
+* **Definition lists**. A simple definition list is made of a single-line
+ term followed by a colon and the definition for that term.
+
+ Cat
+ : Fluffy animal everyone likes
+
+ Internet
+ : Vector of transmission for pictures of cats
+
+ Terms must be separated from the previous definition by a blank line.
+
+* **Footnotes**. A marker in the text that will become a superscript number;
+ a footnote definition that will be placed in a list of footnotes at the
+ end of the document. A footnote looks like this:
+
+ This is a footnote.[^1]
+
+ [^1]: the footnote text.
+
+* **Autolinking**. Blackfriday can find URLs that have not been
+ explicitly marked as links and turn them into links.
+
+* **Strikethrough**. Use two tildes (`~~`) to mark text that
+ should be crossed out.
+
+* **Hard line breaks**. With this extension enabled (it is off by
+ default in the `MarkdownBasic` and `MarkdownCommon` convenience
+ functions), newlines in the input translate into line breaks in
+ the output.
+
+* **Smart quotes**. Smartypants-style punctuation substitution is
+ supported, turning normal double- and single-quote marks into
+ curly quotes, etc.
+
+* **LaTeX-style dash parsing** is an additional option, where `--`
+ is translated into `&ndash;`, and `---` is translated into
+ `&mdash;`. This differs from most smartypants processors, which
+ turn a single hyphen into an ndash and a double hyphen into an
+ mdash.
+
+* **Smart fractions**, where anything that looks like a fraction
+ is translated into suitable HTML (instead of just a few special
+ cases like most smartypant processors). For example, `4/5`
+ becomes `<sup>4</sup>&frasl;<sub>5</sub>`, which renders as
+ <sup>4</sup>&frasl;<sub>5</sub>.
+
+
+Other renderers
+---------------
+
+Blackfriday is structured to allow alternative rendering engines. Here
+are a few of note:
+
+* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
+ provides a GitHub Flavored Markdown renderer with fenced code block
+ highlighting, clickable heading anchor links.
+
+ It's not customizable, and its goal is to produce HTML output
+ equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
+ except the rendering is performed locally.
+
+* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
+ but for markdown.
+
+* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex):
+ renders output as LaTeX.
+
+
+TODO
+----
+
+* More unit testing
+* Improve Unicode support. It does not understand all Unicode
+ rules (about what constitutes a letter, a punctuation symbol,
+ etc.), so it may fail to detect word boundaries correctly in
+ some instances. It is safe on all UTF-8 input.
+
+
+License
+-------
+
+[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
+
+
+ [1]: https://daringfireball.net/projects/markdown/ "Markdown"
+ [2]: https://golang.org/ "Go Language"
+ [3]: https://github.com/vmg/sundown "Sundown"
+ [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func"
+ [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
+ [6]: https://labix.org/gopkg.in "gopkg.in"
+ [7]: https://github.com/golang/dep/ "dep"
+ [8]: https://github.com/Masterminds/glide "Glide"
+
+ [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master
+ [BuildURL]: https://travis-ci.org/russross/blackfriday
+ [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg
+ [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2
diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go
new file mode 100644
index 00000000..7fc731d5
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/block.go
@@ -0,0 +1,1450 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+// Functions to parse block-level elements.
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "unicode"
+)
+
+// Parse block-level data.
+// Note: this function and many that it calls assume that
+// the input buffer ends with a newline.
+func (p *parser) block(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 || data[len(data)-1] != '\n' {
+ panic("block input is missing terminating newline")
+ }
+
+ // this is called recursively: enforce a maximum depth
+ if p.nesting >= p.maxNesting {
+ return
+ }
+ p.nesting++
+
+ // parse out one block-level construct at a time
+ for len(data) > 0 {
+ // prefixed header:
+ //
+ // # Header 1
+ // ## Header 2
+ // ...
+ // ###### Header 6
+ if p.isPrefixHeader(data) {
+ data = data[p.prefixHeader(out, data):]
+ continue
+ }
+
+ // block of preformatted HTML:
+ //
+ // <div>
+ // ...
+ // </div>
+ if data[0] == '<' {
+ if i := p.html(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // title block
+ //
+ // % stuff
+ // % more stuff
+ // % even more stuff
+ if p.flags&EXTENSION_TITLEBLOCK != 0 {
+ if data[0] == '%' {
+ if i := p.titleBlock(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+ }
+
+ // blank lines. note: returns the # of bytes to skip
+ if i := p.isEmpty(data); i > 0 {
+ data = data[i:]
+ continue
+ }
+
+ // indented code block:
+ //
+ // func max(a, b int) int {
+ // if a > b {
+ // return a
+ // }
+ // return b
+ // }
+ if p.codePrefix(data) > 0 {
+ data = data[p.code(out, data):]
+ continue
+ }
+
+ // fenced code block:
+ //
+ // ``` go
+ // func fact(n int) int {
+ // if n <= 1 {
+ // return n
+ // }
+ // return n * fact(n-1)
+ // }
+ // ```
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if i := p.fencedCodeBlock(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // horizontal rule:
+ //
+ // ------
+ // or
+ // ******
+ // or
+ // ______
+ if p.isHRule(data) {
+ p.r.HRule(out)
+ var i int
+ for i = 0; data[i] != '\n'; i++ {
+ }
+ data = data[i:]
+ continue
+ }
+
+ // block quote:
+ //
+ // > A big quote I found somewhere
+ // > on the web
+ if p.quotePrefix(data) > 0 {
+ data = data[p.quote(out, data):]
+ continue
+ }
+
+ // table:
+ //
+ // Name | Age | Phone
+ // ------|-----|---------
+ // Bob | 31 | 555-1234
+ // Alice | 27 | 555-4321
+ if p.flags&EXTENSION_TABLES != 0 {
+ if i := p.table(out, data); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // an itemized/unordered list:
+ //
+ // * Item 1
+ // * Item 2
+ //
+ // also works with + or -
+ if p.uliPrefix(data) > 0 {
+ data = data[p.list(out, data, 0):]
+ continue
+ }
+
+ // a numbered/ordered list:
+ //
+ // 1. Item 1
+ // 2. Item 2
+ if p.oliPrefix(data) > 0 {
+ data = data[p.list(out, data, LIST_TYPE_ORDERED):]
+ continue
+ }
+
+ // definition lists:
+ //
+ // Term 1
+ // : Definition a
+ // : Definition b
+ //
+ // Term 2
+ // : Definition c
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(data) > 0 {
+ data = data[p.list(out, data, LIST_TYPE_DEFINITION):]
+ continue
+ }
+ }
+
+ // anything else must look like a normal paragraph
+ // note: this finds underlined headers, too
+ data = data[p.paragraph(out, data):]
+ }
+
+ p.nesting--
+}
+
+func (p *parser) isPrefixHeader(data []byte) bool {
+ if data[0] != '#' {
+ return false
+ }
+
+ if p.flags&EXTENSION_SPACE_HEADERS != 0 {
+ level := 0
+ for level < 6 && data[level] == '#' {
+ level++
+ }
+ if data[level] != ' ' {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
+ level := 0
+ for level < 6 && data[level] == '#' {
+ level++
+ }
+ i := skipChar(data, level, ' ')
+ end := skipUntilChar(data, i, '\n')
+ skip := end
+ id := ""
+ if p.flags&EXTENSION_HEADER_IDS != 0 {
+ j, k := 0, 0
+ // find start/end of header id
+ for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
+ }
+ for k = j + 1; k < end && data[k] != '}'; k++ {
+ }
+ // extract header id iff found
+ if j < end && k < end {
+ id = string(data[j+2 : k])
+ end = j
+ skip = k + 1
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ }
+ }
+ for end > 0 && data[end-1] == '#' {
+ if isBackslashEscaped(data, end-1) {
+ break
+ }
+ end--
+ }
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ if end > i {
+ if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = SanitizedAnchorName(string(data[i:end]))
+ }
+ work := func() bool {
+ p.inline(out, data[i:end])
+ return true
+ }
+ p.r.Header(out, work, level, id)
+ }
+ return skip
+}
+
+func (p *parser) isUnderlinedHeader(data []byte) int {
+ // test of level 1 header
+ if data[0] == '=' {
+ i := skipChar(data, 1, '=')
+ i = skipChar(data, i, ' ')
+ if data[i] == '\n' {
+ return 1
+ } else {
+ return 0
+ }
+ }
+
+ // test of level 2 header
+ if data[0] == '-' {
+ i := skipChar(data, 1, '-')
+ i = skipChar(data, i, ' ')
+ if data[i] == '\n' {
+ return 2
+ } else {
+ return 0
+ }
+ }
+
+ return 0
+}
+
+func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '%' {
+ return 0
+ }
+ splitData := bytes.Split(data, []byte("\n"))
+ var i int
+ for idx, b := range splitData {
+ if !bytes.HasPrefix(b, []byte("%")) {
+ i = idx // - 1
+ break
+ }
+ }
+
+ data = bytes.Join(splitData[0:i], []byte("\n"))
+ p.r.TitleBlock(out, data)
+
+ return len(data)
+}
+
+func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int {
+ var i, j int
+
+ // identify the opening tag
+ if data[0] != '<' {
+ return 0
+ }
+ curtag, tagfound := p.htmlFindTag(data[1:])
+
+ // handle special cases
+ if !tagfound {
+ // check for an HTML comment
+ if size := p.htmlComment(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // check for an <hr> tag
+ if size := p.htmlHr(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // check for HTML CDATA
+ if size := p.htmlCDATA(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // no special case recognized
+ return 0
+ }
+
+ // look for an unindented matching closing tag
+ // followed by a blank line
+ found := false
+ /*
+ closetag := []byte("\n</" + curtag + ">")
+ j = len(curtag) + 1
+ for !found {
+ // scan for a closing tag at the beginning of a line
+ if skip := bytes.Index(data[j:], closetag); skip >= 0 {
+ j += skip + len(closetag)
+ } else {
+ break
+ }
+
+ // see if it is the only thing on the line
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ // see if it is followed by a blank line/eof
+ j += skip
+ if j >= len(data) {
+ found = true
+ i = j
+ } else {
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ j += skip
+ found = true
+ i = j
+ }
+ }
+ }
+ }
+ */
+
+ // if not found, try a second pass looking for indented match
+ // but not if tag is "ins" or "del" (following original Markdown.pl)
+ if !found && curtag != "ins" && curtag != "del" {
+ i = 1
+ for i < len(data) {
+ i++
+ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
+ i++
+ }
+
+ if i+2+len(curtag) >= len(data) {
+ break
+ }
+
+ j = p.htmlFindEnd(curtag, data[i-1:])
+
+ if j > 0 {
+ i += j - 1
+ found = true
+ break
+ }
+ }
+ }
+
+ if !found {
+ return 0
+ }
+
+ // the end of the block has been found
+ if doRender {
+ // trim newlines
+ end := i
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+
+ return i
+}
+
+func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int {
+ // html block needs to end with a blank line
+ if i := p.isEmpty(data[start:]); i > 0 {
+ size := start + i
+ if doRender {
+ // trim trailing newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+ return size
+ }
+ return 0
+}
+
+// HTML comment, lax form
+func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
+ i := p.inlineHTMLComment(out, data)
+ return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HTML CDATA section
+func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int {
+ const cdataTag = "<![cdata["
+ const cdataTagLen = len(cdataTag)
+ if len(data) < cdataTagLen+1 {
+ return 0
+ }
+ if !bytes.Equal(bytes.ToLower(data[:cdataTagLen]), []byte(cdataTag)) {
+ return 0
+ }
+ i := cdataTagLen
+ // scan for an end-of-comment marker, across lines if necessary
+ for i < len(data) && !(data[i-2] == ']' && data[i-1] == ']' && data[i] == '>') {
+ i++
+ }
+ i++
+ // no end-of-comment marker
+ if i >= len(data) {
+ return 0
+ }
+ return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HR, which is the only self-closing block tag considered
+func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
+ return 0
+ }
+ if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
+ // not an <hr> tag after all; at least not a valid one
+ return 0
+ }
+
+ i := 3
+ for data[i] != '>' && data[i] != '\n' {
+ i++
+ }
+
+ if data[i] == '>' {
+ return p.renderHTMLBlock(out, data, i+1, doRender)
+ }
+
+ return 0
+}
+
+func (p *parser) htmlFindTag(data []byte) (string, bool) {
+ i := 0
+ for isalnum(data[i]) {
+ i++
+ }
+ key := string(data[:i])
+ if _, ok := blockTags[key]; ok {
+ return key, true
+ }
+ return "", false
+}
+
+func (p *parser) htmlFindEnd(tag string, data []byte) int {
+ // assume data[0] == '<' && data[1] == '/' already tested
+
+ // check if tag is a match
+ closetag := []byte("</" + tag + ">")
+ if !bytes.HasPrefix(data, closetag) {
+ return 0
+ }
+ i := len(closetag)
+
+ // check that the rest of the line is blank
+ skip := 0
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ return 0
+ }
+ i += skip
+ skip = 0
+
+ if i >= len(data) {
+ return i
+ }
+
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ return i
+ }
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ // following line must be blank
+ return 0
+ }
+
+ return i + skip
+}
+
+func (*parser) isEmpty(data []byte) int {
+ // it is okay to call isEmpty on an empty buffer
+ if len(data) == 0 {
+ return 0
+ }
+
+ var i int
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
+ if data[i] != ' ' && data[i] != '\t' {
+ return 0
+ }
+ }
+ return i + 1
+}
+
+func (*parser) isHRule(data []byte) bool {
+ i := 0
+
+ // skip up to three spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // look at the hrule char
+ if data[i] != '*' && data[i] != '-' && data[i] != '_' {
+ return false
+ }
+ c := data[i]
+
+ // the whole line must be the char or whitespace
+ n := 0
+ for data[i] != '\n' {
+ switch {
+ case data[i] == c:
+ n++
+ case data[i] != ' ':
+ return false
+ }
+ i++
+ }
+
+ return n >= 3
+}
+
+// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
+// and returns the end index if so, or 0 otherwise. It also returns the marker found.
+// If syntax is not nil, it gets set to the syntax specified in the fence line.
+// A final newline is mandatory to recognize the fence line, unless newlineOptional is true.
+func isFenceLine(data []byte, syntax *string, oldmarker string, newlineOptional bool) (end int, marker string) {
+ i, size := 0, 0
+
+ // skip up to three spaces
+ for i < len(data) && i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // check for the marker characters: ~ or `
+ if i >= len(data) {
+ return 0, ""
+ }
+ if data[i] != '~' && data[i] != '`' {
+ return 0, ""
+ }
+
+ c := data[i]
+
+ // the whole line must be the same char or whitespace
+ for i < len(data) && data[i] == c {
+ size++
+ i++
+ }
+
+ // the marker char must occur at least 3 times
+ if size < 3 {
+ return 0, ""
+ }
+ marker = string(data[i-size : i])
+
+ // if this is the end marker, it must match the beginning marker
+ if oldmarker != "" && marker != oldmarker {
+ return 0, ""
+ }
+
+ // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
+ // into one, always get the syntax, and discard it if the caller doesn't care.
+ if syntax != nil {
+ syn := 0
+ i = skipChar(data, i, ' ')
+
+ if i >= len(data) {
+ if newlineOptional && i == len(data) {
+ return i, marker
+ }
+ return 0, ""
+ }
+
+ syntaxStart := i
+
+ if data[i] == '{' {
+ i++
+ syntaxStart++
+
+ for i < len(data) && data[i] != '}' && data[i] != '\n' {
+ syn++
+ i++
+ }
+
+ if i >= len(data) || data[i] != '}' {
+ return 0, ""
+ }
+
+ // strip all whitespace at the beginning and the end
+ // of the {} block
+ for syn > 0 && isspace(data[syntaxStart]) {
+ syntaxStart++
+ syn--
+ }
+
+ for syn > 0 && isspace(data[syntaxStart+syn-1]) {
+ syn--
+ }
+
+ i++
+ } else {
+ for i < len(data) && !isspace(data[i]) {
+ syn++
+ i++
+ }
+ }
+
+ *syntax = string(data[syntaxStart : syntaxStart+syn])
+ }
+
+ i = skipChar(data, i, ' ')
+ if i >= len(data) || data[i] != '\n' {
+ if newlineOptional && i == len(data) {
+ return i, marker
+ }
+ return 0, ""
+ }
+
+ return i + 1, marker // Take newline into account.
+}
+
+// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
+// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
+// If doRender is true, a final newline is mandatory to recognize the fenced code block.
+func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int {
+ var syntax string
+ beg, marker := isFenceLine(data, &syntax, "", false)
+ if beg == 0 || beg >= len(data) {
+ return 0
+ }
+
+ var work bytes.Buffer
+
+ for {
+ // safe to assume beg < len(data)
+
+ // check for the end of the code block
+ newlineOptional := !doRender
+ fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional)
+ if fenceEnd != 0 {
+ beg += fenceEnd
+ break
+ }
+
+ // copy the current line
+ end := skipUntilChar(data, beg, '\n') + 1
+
+ // did we reach the end of the buffer without a closing marker?
+ if end >= len(data) {
+ return 0
+ }
+
+ // verbatim copy to the working buffer
+ if doRender {
+ work.Write(data[beg:end])
+ }
+ beg = end
+ }
+
+ if doRender {
+ p.r.BlockCode(out, work.Bytes(), syntax)
+ }
+
+ return beg
+}
+
+func (p *parser) table(out *bytes.Buffer, data []byte) int {
+ var header bytes.Buffer
+ i, columns := p.tableHeader(&header, data)
+ if i == 0 {
+ return 0
+ }
+
+ var body bytes.Buffer
+
+ for i < len(data) {
+ pipes, rowStart := 0, i
+ for ; data[i] != '\n'; i++ {
+ if data[i] == '|' {
+ pipes++
+ }
+ }
+
+ if pipes == 0 {
+ i = rowStart
+ break
+ }
+
+ // include the newline in data sent to tableRow
+ i++
+ p.tableRow(&body, data[rowStart:i], columns, false)
+ }
+
+ p.r.Table(out, header.Bytes(), body.Bytes(), columns)
+
+ return i
+}
+
+// check if the specified position is preceded by an odd number of backslashes
+func isBackslashEscaped(data []byte, i int) bool {
+ backslashes := 0
+ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
+ backslashes++
+ }
+ return backslashes&1 == 1
+}
+
+func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) {
+ i := 0
+ colCount := 1
+ for i = 0; data[i] != '\n'; i++ {
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ colCount++
+ }
+ }
+
+ // doesn't look like a table header
+ if colCount == 1 {
+ return
+ }
+
+ // include the newline in the data sent to tableRow
+ header := data[:i+1]
+
+ // column count ignores pipes at beginning or end of line
+ if data[0] == '|' {
+ colCount--
+ }
+ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
+ colCount--
+ }
+
+ columns = make([]int, colCount)
+
+ // move on to the header underline
+ i++
+ if i >= len(data) {
+ return
+ }
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+ i = skipChar(data, i, ' ')
+
+ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
+ // and trailing | optional on last column
+ col := 0
+ for data[i] != '\n' {
+ dashes := 0
+
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_LEFT
+ dashes++
+ }
+ for data[i] == '-' {
+ i++
+ dashes++
+ }
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_RIGHT
+ dashes++
+ }
+ for data[i] == ' ' {
+ i++
+ }
+
+ // end of column test is messy
+ switch {
+ case dashes < 3:
+ // not a valid column
+ return
+
+ case data[i] == '|' && !isBackslashEscaped(data, i):
+ // marker found, now skip past trailing whitespace
+ col++
+ i++
+ for data[i] == ' ' {
+ i++
+ }
+
+ // trailing junk found after last column
+ if col >= colCount && data[i] != '\n' {
+ return
+ }
+
+ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
+ // something else found where marker was required
+ return
+
+ case data[i] == '\n':
+ // marker is optional for the last column
+ col++
+
+ default:
+ // trailing junk found after last column
+ return
+ }
+ }
+ if col != colCount {
+ return
+ }
+
+ p.tableRow(out, header, columns, true)
+ size = i + 1
+ return
+}
+
+func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) {
+ i, col := 0, 0
+ var rowWork bytes.Buffer
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+
+ for col = 0; col < len(columns) && i < len(data); col++ {
+ for data[i] == ' ' {
+ i++
+ }
+
+ cellStart := i
+
+ for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
+ i++
+ }
+
+ cellEnd := i
+
+ // skip the end-of-cell marker, possibly taking us past end of buffer
+ i++
+
+ for cellEnd > cellStart && data[cellEnd-1] == ' ' {
+ cellEnd--
+ }
+
+ var cellWork bytes.Buffer
+ p.inline(&cellWork, data[cellStart:cellEnd])
+
+ if header {
+ p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col])
+ } else {
+ p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col])
+ }
+ }
+
+ // pad it out with empty columns to get the right number
+ for ; col < len(columns); col++ {
+ if header {
+ p.r.TableHeaderCell(&rowWork, nil, columns[col])
+ } else {
+ p.r.TableCell(&rowWork, nil, columns[col])
+ }
+ }
+
+ // silently ignore rows with too many cells
+
+ p.r.TableRow(out, rowWork.Bytes())
+}
+
+// returns blockquote prefix length
+func (p *parser) quotePrefix(data []byte) int {
+ i := 0
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+ if data[i] == '>' {
+ if data[i+1] == ' ' {
+ return i + 2
+ }
+ return i + 1
+ }
+ return 0
+}
+
+// blockquote ends with at least one blank line
+// followed by something without a blockquote prefix
+func (p *parser) terminateBlockquote(data []byte, beg, end int) bool {
+ if p.isEmpty(data[beg:]) <= 0 {
+ return false
+ }
+ if end >= len(data) {
+ return true
+ }
+ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
+}
+
+// parse a blockquote fragment
+func (p *parser) quote(out *bytes.Buffer, data []byte) int {
+ var raw bytes.Buffer
+ beg, end := 0, 0
+ for beg < len(data) {
+ end = beg
+ // Step over whole lines, collecting them. While doing that, check for
+ // fenced code and if one's found, incorporate it altogether,
+ // irregardless of any contents inside it
+ for data[end] != '\n' {
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if i := p.fencedCodeBlock(out, data[end:], false); i > 0 {
+ // -1 to compensate for the extra end++ after the loop:
+ end += i - 1
+ break
+ }
+ }
+ end++
+ }
+ end++
+
+ if pre := p.quotePrefix(data[beg:]); pre > 0 {
+ // skip the prefix
+ beg += pre
+ } else if p.terminateBlockquote(data, beg, end) {
+ break
+ }
+
+ // this line is part of the blockquote
+ raw.Write(data[beg:end])
+ beg = end
+ }
+
+ var cooked bytes.Buffer
+ p.block(&cooked, raw.Bytes())
+ p.r.BlockQuote(out, cooked.Bytes())
+ return end
+}
+
+// returns prefix length for block code
+func (p *parser) codePrefix(data []byte) int {
+ if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
+ return 4
+ }
+ return 0
+}
+
+func (p *parser) code(out *bytes.Buffer, data []byte) int {
+ var work bytes.Buffer
+
+ i := 0
+ for i < len(data) {
+ beg := i
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+
+ blankline := p.isEmpty(data[beg:i]) > 0
+ if pre := p.codePrefix(data[beg:i]); pre > 0 {
+ beg += pre
+ } else if !blankline {
+ // non-empty, non-prefixed line breaks the pre
+ i = beg
+ break
+ }
+
+ // verbatim copy to the working buffeu
+ if blankline {
+ work.WriteByte('\n')
+ } else {
+ work.Write(data[beg:i])
+ }
+ }
+
+ // trim all the \n off the end of work
+ workbytes := work.Bytes()
+ eol := len(workbytes)
+ for eol > 0 && workbytes[eol-1] == '\n' {
+ eol--
+ }
+ if eol != len(workbytes) {
+ work.Truncate(eol)
+ }
+
+ work.WriteByte('\n')
+
+ p.r.BlockCode(out, work.Bytes(), "")
+
+ return i
+}
+
+// returns unordered list item prefix
+func (p *parser) uliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // need a *, +, or - followed by a space
+ if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
+ data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns ordered list item prefix
+func (p *parser) oliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // count the digits
+ start := i
+ for data[i] >= '0' && data[i] <= '9' {
+ i++
+ }
+
+ // we need >= 1 digits followed by a dot and a space
+ if start == i || data[i] != '.' || data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns definition list item prefix
+func (p *parser) dliPrefix(data []byte) int {
+ i := 0
+
+ // need a : followed by a spaces
+ if data[i] != ':' || data[i+1] != ' ' {
+ return 0
+ }
+ for data[i] == ' ' {
+ i++
+ }
+ return i + 2
+}
+
+// parse ordered or unordered list block
+func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
+ i := 0
+ flags |= LIST_ITEM_BEGINNING_OF_LIST
+ work := func() bool {
+ for i < len(data) {
+ skip := p.listItem(out, data[i:], &flags)
+ i += skip
+
+ if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
+ break
+ }
+ flags &= ^LIST_ITEM_BEGINNING_OF_LIST
+ }
+ return true
+ }
+
+ p.r.List(out, work, flags)
+ return i
+}
+
+// Parse a single list item.
+// Assumes initial prefix is already removed if this is a sublist.
+func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
+ // keep track of the indentation of the first line
+ itemIndent := 0
+ for itemIndent < 3 && data[itemIndent] == ' ' {
+ itemIndent++
+ }
+
+ i := p.uliPrefix(data)
+ if i == 0 {
+ i = p.oliPrefix(data)
+ }
+ if i == 0 {
+ i = p.dliPrefix(data)
+ // reset definition term flag
+ if i > 0 {
+ *flags &= ^LIST_TYPE_TERM
+ }
+ }
+ if i == 0 {
+ // if in defnition list, set term flag and continue
+ if *flags&LIST_TYPE_DEFINITION != 0 {
+ *flags |= LIST_TYPE_TERM
+ } else {
+ return 0
+ }
+ }
+
+ // skip leading whitespace on first line
+ for data[i] == ' ' {
+ i++
+ }
+
+ // find the end of the line
+ line := i
+ for i > 0 && data[i-1] != '\n' {
+ i++
+ }
+
+ // get working buffer
+ var raw bytes.Buffer
+
+ // put the first line into the working buffer
+ raw.Write(data[line:i])
+ line = i
+
+ // process the following lines
+ containsBlankLine := false
+ sublist := 0
+
+gatherlines:
+ for line < len(data) {
+ i++
+
+ // find the end of this line
+ for data[i-1] != '\n' {
+ i++
+ }
+
+ // if it is an empty line, guess that it is part of this item
+ // and move on to the next line
+ if p.isEmpty(data[line:i]) > 0 {
+ containsBlankLine = true
+ raw.Write(data[line:i])
+ line = i
+ continue
+ }
+
+ // calculate the indentation
+ indent := 0
+ for indent < 4 && line+indent < i && data[line+indent] == ' ' {
+ indent++
+ }
+
+ chunk := data[line+indent : i]
+
+ // evaluate how this line fits in
+ switch {
+ // is this a nested list item?
+ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
+ p.oliPrefix(chunk) > 0 ||
+ p.dliPrefix(chunk) > 0:
+
+ if containsBlankLine {
+ // end the list if the type changed after a blank line
+ if indent <= itemIndent &&
+ ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) ||
+ (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) {
+
+ *flags |= LIST_ITEM_END_OF_LIST
+ break gatherlines
+ }
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ // to be a nested list, it must be indented more
+ // if not, it is the next item in the same list
+ if indent <= itemIndent {
+ break gatherlines
+ }
+
+ // is this the first item in the nested list?
+ if sublist == 0 {
+ sublist = raw.Len()
+ }
+
+ // is this a nested prefix header?
+ case p.isPrefixHeader(chunk):
+ // if the header is not indented, it is not nested in the list
+ // and thus ends the list
+ if containsBlankLine && indent < 4 {
+ *flags |= LIST_ITEM_END_OF_LIST
+ break gatherlines
+ }
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+
+ // anything following an empty line is only part
+ // of this item if it is indented 4 spaces
+ // (regardless of the indentation of the beginning of the item)
+ case containsBlankLine && indent < 4:
+ if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
+ // is the next item still a part of this list?
+ next := i
+ for data[next] != '\n' {
+ next++
+ }
+ for next < len(data)-1 && data[next] == '\n' {
+ next++
+ }
+ if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ } else {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ break gatherlines
+
+ // a blank line means this should be parsed as a block
+ case containsBlankLine:
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ containsBlankLine = false
+
+ // add the line into the working buffer without prefix
+ raw.Write(data[line+indent : i])
+
+ line = i
+ }
+
+ // If reached end of data, the Renderer.ListItem call we're going to make below
+ // is definitely the last in the list.
+ if line >= len(data) {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+
+ rawBytes := raw.Bytes()
+
+ // render the contents of the list item
+ var cooked bytes.Buffer
+ if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
+ // intermediate render of block item, except for definition term
+ if sublist > 0 {
+ p.block(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.block(&cooked, rawBytes)
+ }
+ } else {
+ // intermediate render of inline item
+ if sublist > 0 {
+ p.inline(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.inline(&cooked, rawBytes)
+ }
+ }
+
+ // render the actual list item
+ cookedBytes := cooked.Bytes()
+ parsedEnd := len(cookedBytes)
+
+ // strip trailing newlines
+ for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' {
+ parsedEnd--
+ }
+ p.r.ListItem(out, cookedBytes[:parsedEnd], *flags)
+
+ return line
+}
+
+// render a single paragraph that has already been parsed out
+func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 {
+ return
+ }
+
+ // trim leading spaces
+ beg := 0
+ for data[beg] == ' ' {
+ beg++
+ }
+
+ // trim trailing newline
+ end := len(data) - 1
+
+ // trim trailing spaces
+ for end > beg && data[end-1] == ' ' {
+ end--
+ }
+
+ work := func() bool {
+ p.inline(out, data[beg:end])
+ return true
+ }
+ p.r.Paragraph(out, work)
+}
+
+func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
+ // prev: index of 1st char of previous line
+ // line: index of 1st char of current line
+ // i: index of cursor/end of current line
+ var prev, line, i int
+
+ // keep going until we find something to mark the end of the paragraph
+ for i < len(data) {
+ // mark the beginning of the current line
+ prev = line
+ current := data[i:]
+ line = i
+
+ // did we find a blank line marking the end of the paragraph?
+ if n := p.isEmpty(current); n > 0 {
+ // did this blank line followed by a definition list item?
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if i < len(data)-1 && data[i+1] == ':' {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i + n
+ }
+
+ // an underline under some text marks a header, so our paragraph ended on prev line
+ if i > 0 {
+ if level := p.isUnderlinedHeader(current); level > 0 {
+ // render the paragraph
+ p.renderParagraph(out, data[:prev])
+
+ // ignore leading and trailing whitespace
+ eol := i - 1
+ for prev < eol && data[prev] == ' ' {
+ prev++
+ }
+ for eol > prev && data[eol-1] == ' ' {
+ eol--
+ }
+
+ // render the header
+ // this ugly double closure avoids forcing variables onto the heap
+ work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool {
+ return func() bool {
+ pp.inline(o, d)
+ return true
+ }
+ }(out, p, data[prev:eol])
+
+ id := ""
+ if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = SanitizedAnchorName(string(data[prev:eol]))
+ }
+
+ p.r.Header(out, work, level, id)
+
+ // find the end of the underline
+ for data[i] != '\n' {
+ i++
+ }
+ return i
+ }
+ }
+
+ // if the next line starts a block of HTML, then the paragraph ends here
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ if data[i] == '<' && p.html(out, current, false) > 0 {
+ // rewind to before the HTML block
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a prefixed header or a horizontal rule after this, paragraph is over
+ if p.isPrefixHeader(current) || p.isHRule(current) {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+
+ // if there's a fenced code block, paragraph is over
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if p.fencedCodeBlock(out, current, false) > 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a definition list item, prev line is a definition term
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(current) != 0 {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ // if there's a list after this, paragraph is over
+ if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
+ if p.uliPrefix(current) != 0 ||
+ p.oliPrefix(current) != 0 ||
+ p.quotePrefix(current) != 0 ||
+ p.codePrefix(current) != 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // otherwise, scan to the beginning of the next line
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i
+}
+
+// SanitizedAnchorName returns a sanitized anchor name for the given text.
+//
+// It implements the algorithm specified in the package comment.
+func SanitizedAnchorName(text string) string {
+ var anchorName []rune
+ futureDash := false
+ for _, r := range text {
+ switch {
+ case unicode.IsLetter(r) || unicode.IsNumber(r):
+ if futureDash && len(anchorName) > 0 {
+ anchorName = append(anchorName, '-')
+ }
+ futureDash = false
+ anchorName = append(anchorName, unicode.ToLower(r))
+ default:
+ futureDash = true
+ }
+ }
+ return string(anchorName)
+}
diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go
new file mode 100644
index 00000000..9656c42a
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/doc.go
@@ -0,0 +1,32 @@
+// Package blackfriday is a Markdown processor.
+//
+// It translates plain text with simple formatting rules into HTML or LaTeX.
+//
+// Sanitized Anchor Names
+//
+// Blackfriday includes an algorithm for creating sanitized anchor names
+// corresponding to a given input text. This algorithm is used to create
+// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The
+// algorithm is specified below, so that other packages can create
+// compatible anchor names and links to those anchors.
+//
+// The algorithm iterates over the input text, interpreted as UTF-8,
+// one Unicode code point (rune) at a time. All runes that are letters (category L)
+// or numbers (category N) are considered valid characters. They are mapped to
+// lower case, and included in the output. All other runes are considered
+// invalid characters. Invalid characters that preceed the first valid character,
+// as well as invalid character that follow the last valid character
+// are dropped completely. All other sequences of invalid characters
+// between two valid characters are replaced with a single dash character '-'.
+//
+// SanitizedAnchorName exposes this functionality, and can be used to
+// create compatible links to the anchor names generated by blackfriday.
+// This algorithm is also implemented in a small standalone package at
+// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
+// that want a small package and don't need full functionality of blackfriday.
+package blackfriday
+
+// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
+// github.com/shurcooL/sanitized_anchor_name.
+// Otherwise, users of sanitized_anchor_name will get anchor names
+// that are incompatible with those generated by blackfriday.
diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go
new file mode 100644
index 00000000..c917c7d3
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/html.go
@@ -0,0 +1,950 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// HTML rendering backend
+//
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// Html renderer configuration options.
+const (
+ HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks
+ HTML_SKIP_STYLE // skip embedded <style> elements
+ HTML_SKIP_IMAGES // skip embedded images
+ HTML_SKIP_LINKS // skip all links
+ HTML_SAFELINK // only link to trusted protocols
+ HTML_NOFOLLOW_LINKS // only link with rel="nofollow"
+ HTML_NOREFERRER_LINKS // only link with rel="noreferrer"
+ HTML_HREF_TARGET_BLANK // add a blank target
+ HTML_TOC // generate a table of contents
+ HTML_OMIT_CONTENTS // skip the main contents (for a standalone table of contents)
+ HTML_COMPLETE_PAGE // generate a complete HTML page
+ HTML_USE_XHTML // generate XHTML output instead of HTML
+ HTML_USE_SMARTYPANTS // enable smart punctuation substitutions
+ HTML_SMARTYPANTS_FRACTIONS // enable smart fractions (with HTML_USE_SMARTYPANTS)
+ HTML_SMARTYPANTS_DASHES // enable smart dashes (with HTML_USE_SMARTYPANTS)
+ HTML_SMARTYPANTS_LATEX_DASHES // enable LaTeX-style dashes (with HTML_USE_SMARTYPANTS and HTML_SMARTYPANTS_DASHES)
+ HTML_SMARTYPANTS_ANGLED_QUOTES // enable angled double quotes (with HTML_USE_SMARTYPANTS) for double quotes rendering
+ HTML_SMARTYPANTS_QUOTES_NBSP // enable "French guillemets" (with HTML_USE_SMARTYPANTS)
+ HTML_FOOTNOTE_RETURN_LINKS // generate a link at the end of a footnote to return to the source
+)
+
+var (
+ alignments = []string{
+ "left",
+ "right",
+ "center",
+ }
+
+ // TODO: improve this regexp to catch all possible entities:
+ htmlEntity = regexp.MustCompile(`&[a-z]{2,5};`)
+)
+
+type HtmlRendererParameters struct {
+ // Prepend this text to each relative URL.
+ AbsolutePrefix string
+ // Add this text to each footnote anchor, to ensure uniqueness.
+ FootnoteAnchorPrefix string
+ // Show this text inside the <a> tag for a footnote return link, if the
+ // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
+ // <sup>[return]</sup> is used.
+ FootnoteReturnLinkContents string
+ // If set, add this text to the front of each Header ID, to ensure
+ // uniqueness.
+ HeaderIDPrefix string
+ // If set, add this text to the back of each Header ID, to ensure uniqueness.
+ HeaderIDSuffix string
+}
+
+// Html is a type that implements the Renderer interface for HTML output.
+//
+// Do not create this directly, instead use the HtmlRenderer function.
+type Html struct {
+ flags int // HTML_* options
+ closeTag string // how to end singleton tags: either " />" or ">"
+ title string // document title
+ css string // optional css file url (used with HTML_COMPLETE_PAGE)
+
+ parameters HtmlRendererParameters
+
+ // table of contents data
+ tocMarker int
+ headerCount int
+ currentLevel int
+ toc *bytes.Buffer
+
+ // Track header IDs to prevent ID collision in a single generation.
+ headerIDs map[string]int
+
+ smartypants *smartypantsRenderer
+}
+
+const (
+ xhtmlClose = " />"
+ htmlClose = ">"
+)
+
+// HtmlRenderer creates and configures an Html object, which
+// satisfies the Renderer interface.
+//
+// flags is a set of HTML_* options ORed together.
+// title is the title of the document, and css is a URL for the document's
+// stylesheet.
+// title and css are only used when HTML_COMPLETE_PAGE is selected.
+func HtmlRenderer(flags int, title string, css string) Renderer {
+ return HtmlRendererWithParameters(flags, title, css, HtmlRendererParameters{})
+}
+
+func HtmlRendererWithParameters(flags int, title string,
+ css string, renderParameters HtmlRendererParameters) Renderer {
+ // configure the rendering engine
+ closeTag := htmlClose
+ if flags&HTML_USE_XHTML != 0 {
+ closeTag = xhtmlClose
+ }
+
+ if renderParameters.FootnoteReturnLinkContents == "" {
+ renderParameters.FootnoteReturnLinkContents = `<sup>[return]</sup>`
+ }
+
+ return &Html{
+ flags: flags,
+ closeTag: closeTag,
+ title: title,
+ css: css,
+ parameters: renderParameters,
+
+ headerCount: 0,
+ currentLevel: 0,
+ toc: new(bytes.Buffer),
+
+ headerIDs: make(map[string]int),
+
+ smartypants: smartypants(flags),
+ }
+}
+
+// Using if statements is a bit faster than a switch statement. As the compiler
+// improves, this should be unnecessary this is only worthwhile because
+// attrEscape is the single largest CPU user in normal use.
+// Also tried using map, but that gave a ~3x slowdown.
+func escapeSingleChar(char byte) (string, bool) {
+ if char == '"' {
+ return "&quot;", true
+ }
+ if char == '&' {
+ return "&amp;", true
+ }
+ if char == '<' {
+ return "&lt;", true
+ }
+ if char == '>' {
+ return "&gt;", true
+ }
+ return "", false
+}
+
+func attrEscape(out *bytes.Buffer, src []byte) {
+ org := 0
+ for i, ch := range src {
+ if entity, ok := escapeSingleChar(ch); ok {
+ if i > org {
+ // copy all the normal characters since the last escape
+ out.Write(src[org:i])
+ }
+ org = i + 1
+ out.WriteString(entity)
+ }
+ }
+ if org < len(src) {
+ out.Write(src[org:])
+ }
+}
+
+func entityEscapeWithSkip(out *bytes.Buffer, src []byte, skipRanges [][]int) {
+ end := 0
+ for _, rang := range skipRanges {
+ attrEscape(out, src[end:rang[0]])
+ out.Write(src[rang[0]:rang[1]])
+ end = rang[1]
+ }
+ attrEscape(out, src[end:])
+}
+
+func (options *Html) GetFlags() int {
+ return options.flags
+}
+
+func (options *Html) TitleBlock(out *bytes.Buffer, text []byte) {
+ text = bytes.TrimPrefix(text, []byte("% "))
+ text = bytes.Replace(text, []byte("\n% "), []byte("\n"), -1)
+ out.WriteString("<h1 class=\"title\">")
+ out.Write(text)
+ out.WriteString("\n</h1>")
+}
+
+func (options *Html) Header(out *bytes.Buffer, text func() bool, level int, id string) {
+ marker := out.Len()
+ doubleSpace(out)
+
+ if id == "" && options.flags&HTML_TOC != 0 {
+ id = fmt.Sprintf("toc_%d", options.headerCount)
+ }
+
+ if id != "" {
+ id = options.ensureUniqueHeaderID(id)
+
+ if options.parameters.HeaderIDPrefix != "" {
+ id = options.parameters.HeaderIDPrefix + id
+ }
+
+ if options.parameters.HeaderIDSuffix != "" {
+ id = id + options.parameters.HeaderIDSuffix
+ }
+
+ out.WriteString(fmt.Sprintf("<h%d id=\"%s\">", level, id))
+ } else {
+ out.WriteString(fmt.Sprintf("<h%d>", level))
+ }
+
+ tocMarker := out.Len()
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+
+ // are we building a table of contents?
+ if options.flags&HTML_TOC != 0 {
+ options.TocHeaderWithAnchor(out.Bytes()[tocMarker:], level, id)
+ }
+
+ out.WriteString(fmt.Sprintf("</h%d>\n", level))
+}
+
+func (options *Html) BlockHtml(out *bytes.Buffer, text []byte) {
+ if options.flags&HTML_SKIP_HTML != 0 {
+ return
+ }
+
+ doubleSpace(out)
+ out.Write(text)
+ out.WriteByte('\n')
+}
+
+func (options *Html) HRule(out *bytes.Buffer) {
+ doubleSpace(out)
+ out.WriteString("<hr")
+ out.WriteString(options.closeTag)
+ out.WriteByte('\n')
+}
+
+func (options *Html) BlockCode(out *bytes.Buffer, text []byte, lang string) {
+ doubleSpace(out)
+
+ // parse out the language names/classes
+ count := 0
+ for _, elt := range strings.Fields(lang) {
+ if elt[0] == '.' {
+ elt = elt[1:]
+ }
+ if len(elt) == 0 {
+ continue
+ }
+ if count == 0 {
+ out.WriteString("<pre><code class=\"language-")
+ } else {
+ out.WriteByte(' ')
+ }
+ attrEscape(out, []byte(elt))
+ count++
+ }
+
+ if count == 0 {
+ out.WriteString("<pre><code>")
+ } else {
+ out.WriteString("\">")
+ }
+
+ attrEscape(out, text)
+ out.WriteString("</code></pre>\n")
+}
+
+func (options *Html) BlockQuote(out *bytes.Buffer, text []byte) {
+ doubleSpace(out)
+ out.WriteString("<blockquote>\n")
+ out.Write(text)
+ out.WriteString("</blockquote>\n")
+}
+
+func (options *Html) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+ doubleSpace(out)
+ out.WriteString("<table>\n<thead>\n")
+ out.Write(header)
+ out.WriteString("</thead>\n\n<tbody>\n")
+ out.Write(body)
+ out.WriteString("</tbody>\n</table>\n")
+}
+
+func (options *Html) TableRow(out *bytes.Buffer, text []byte) {
+ doubleSpace(out)
+ out.WriteString("<tr>\n")
+ out.Write(text)
+ out.WriteString("\n</tr>\n")
+}
+
+func (options *Html) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
+ doubleSpace(out)
+ switch align {
+ case TABLE_ALIGNMENT_LEFT:
+ out.WriteString("<th align=\"left\">")
+ case TABLE_ALIGNMENT_RIGHT:
+ out.WriteString("<th align=\"right\">")
+ case TABLE_ALIGNMENT_CENTER:
+ out.WriteString("<th align=\"center\">")
+ default:
+ out.WriteString("<th>")
+ }
+
+ out.Write(text)
+ out.WriteString("</th>")
+}
+
+func (options *Html) TableCell(out *bytes.Buffer, text []byte, align int) {
+ doubleSpace(out)
+ switch align {
+ case TABLE_ALIGNMENT_LEFT:
+ out.WriteString("<td align=\"left\">")
+ case TABLE_ALIGNMENT_RIGHT:
+ out.WriteString("<td align=\"right\">")
+ case TABLE_ALIGNMENT_CENTER:
+ out.WriteString("<td align=\"center\">")
+ default:
+ out.WriteString("<td>")
+ }
+
+ out.Write(text)
+ out.WriteString("</td>")
+}
+
+func (options *Html) Footnotes(out *bytes.Buffer, text func() bool) {
+ out.WriteString("<div class=\"footnotes\">\n")
+ options.HRule(out)
+ options.List(out, text, LIST_TYPE_ORDERED)
+ out.WriteString("</div>\n")
+}
+
+func (options *Html) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+ if flags&LIST_ITEM_CONTAINS_BLOCK != 0 || flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
+ doubleSpace(out)
+ }
+ slug := slugify(name)
+ out.WriteString(`<li id="`)
+ out.WriteString(`fn:`)
+ out.WriteString(options.parameters.FootnoteAnchorPrefix)
+ out.Write(slug)
+ out.WriteString(`">`)
+ out.Write(text)
+ if options.flags&HTML_FOOTNOTE_RETURN_LINKS != 0 {
+ out.WriteString(` <a class="footnote-return" href="#`)
+ out.WriteString(`fnref:`)
+ out.WriteString(options.parameters.FootnoteAnchorPrefix)
+ out.Write(slug)
+ out.WriteString(`">`)
+ out.WriteString(options.parameters.FootnoteReturnLinkContents)
+ out.WriteString(`</a>`)
+ }
+ out.WriteString("</li>\n")
+}
+
+func (options *Html) List(out *bytes.Buffer, text func() bool, flags int) {
+ marker := out.Len()
+ doubleSpace(out)
+
+ if flags&LIST_TYPE_DEFINITION != 0 {
+ out.WriteString("<dl>")
+ } else if flags&LIST_TYPE_ORDERED != 0 {
+ out.WriteString("<ol>")
+ } else {
+ out.WriteString("<ul>")
+ }
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ if flags&LIST_TYPE_DEFINITION != 0 {
+ out.WriteString("</dl>\n")
+ } else if flags&LIST_TYPE_ORDERED != 0 {
+ out.WriteString("</ol>\n")
+ } else {
+ out.WriteString("</ul>\n")
+ }
+}
+
+func (options *Html) ListItem(out *bytes.Buffer, text []byte, flags int) {
+ if (flags&LIST_ITEM_CONTAINS_BLOCK != 0 && flags&LIST_TYPE_DEFINITION == 0) ||
+ flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
+ doubleSpace(out)
+ }
+ if flags&LIST_TYPE_TERM != 0 {
+ out.WriteString("<dt>")
+ } else if flags&LIST_TYPE_DEFINITION != 0 {
+ out.WriteString("<dd>")
+ } else {
+ out.WriteString("<li>")
+ }
+ out.Write(text)
+ if flags&LIST_TYPE_TERM != 0 {
+ out.WriteString("</dt>\n")
+ } else if flags&LIST_TYPE_DEFINITION != 0 {
+ out.WriteString("</dd>\n")
+ } else {
+ out.WriteString("</li>\n")
+ }
+}
+
+func (options *Html) Paragraph(out *bytes.Buffer, text func() bool) {
+ marker := out.Len()
+ doubleSpace(out)
+
+ out.WriteString("<p>")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ out.WriteString("</p>\n")
+}
+
+func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) {
+ skipRanges := htmlEntity.FindAllIndex(link, -1)
+ if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) && kind != LINK_TYPE_EMAIL {
+ // mark it but don't link it if it is not a safe link: no smartypants
+ out.WriteString("<tt>")
+ entityEscapeWithSkip(out, link, skipRanges)
+ out.WriteString("</tt>")
+ return
+ }
+
+ out.WriteString("<a href=\"")
+ if kind == LINK_TYPE_EMAIL {
+ out.WriteString("mailto:")
+ } else {
+ options.maybeWriteAbsolutePrefix(out, link)
+ }
+
+ entityEscapeWithSkip(out, link, skipRanges)
+
+ var relAttrs []string
+ if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
+ relAttrs = append(relAttrs, "nofollow")
+ }
+ if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
+ relAttrs = append(relAttrs, "noreferrer")
+ }
+ if len(relAttrs) > 0 {
+ out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
+ }
+
+ // blank target only add to external link
+ if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
+ out.WriteString("\" target=\"_blank")
+ }
+
+ out.WriteString("\">")
+
+ // Pretty print: if we get an email address as
+ // an actual URI, e.g. `mailto:foo@bar.com`, we don't
+ // want to print the `mailto:` prefix
+ switch {
+ case bytes.HasPrefix(link, []byte("mailto://")):
+ attrEscape(out, link[len("mailto://"):])
+ case bytes.HasPrefix(link, []byte("mailto:")):
+ attrEscape(out, link[len("mailto:"):])
+ default:
+ entityEscapeWithSkip(out, link, skipRanges)
+ }
+
+ out.WriteString("</a>")
+}
+
+func (options *Html) CodeSpan(out *bytes.Buffer, text []byte) {
+ out.WriteString("<code>")
+ attrEscape(out, text)
+ out.WriteString("</code>")
+}
+
+func (options *Html) DoubleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("<strong>")
+ out.Write(text)
+ out.WriteString("</strong>")
+}
+
+func (options *Html) Emphasis(out *bytes.Buffer, text []byte) {
+ if len(text) == 0 {
+ return
+ }
+ out.WriteString("<em>")
+ out.Write(text)
+ out.WriteString("</em>")
+}
+
+func (options *Html) maybeWriteAbsolutePrefix(out *bytes.Buffer, link []byte) {
+ if options.parameters.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
+ out.WriteString(options.parameters.AbsolutePrefix)
+ if link[0] != '/' {
+ out.WriteByte('/')
+ }
+ }
+}
+
+func (options *Html) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+ if options.flags&HTML_SKIP_IMAGES != 0 {
+ return
+ }
+
+ out.WriteString("<img src=\"")
+ options.maybeWriteAbsolutePrefix(out, link)
+ attrEscape(out, link)
+ out.WriteString("\" alt=\"")
+ if len(alt) > 0 {
+ attrEscape(out, alt)
+ }
+ if len(title) > 0 {
+ out.WriteString("\" title=\"")
+ attrEscape(out, title)
+ }
+
+ out.WriteByte('"')
+ out.WriteString(options.closeTag)
+}
+
+func (options *Html) LineBreak(out *bytes.Buffer) {
+ out.WriteString("<br")
+ out.WriteString(options.closeTag)
+ out.WriteByte('\n')
+}
+
+func (options *Html) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+ if options.flags&HTML_SKIP_LINKS != 0 {
+ // write the link text out but don't link it, just mark it with typewriter font
+ out.WriteString("<tt>")
+ attrEscape(out, content)
+ out.WriteString("</tt>")
+ return
+ }
+
+ if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) {
+ // write the link text out but don't link it, just mark it with typewriter font
+ out.WriteString("<tt>")
+ attrEscape(out, content)
+ out.WriteString("</tt>")
+ return
+ }
+
+ out.WriteString("<a href=\"")
+ options.maybeWriteAbsolutePrefix(out, link)
+ attrEscape(out, link)
+ if len(title) > 0 {
+ out.WriteString("\" title=\"")
+ attrEscape(out, title)
+ }
+ var relAttrs []string
+ if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
+ relAttrs = append(relAttrs, "nofollow")
+ }
+ if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
+ relAttrs = append(relAttrs, "noreferrer")
+ }
+ if len(relAttrs) > 0 {
+ out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
+ }
+
+ // blank target only add to external link
+ if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
+ out.WriteString("\" target=\"_blank")
+ }
+
+ out.WriteString("\">")
+ out.Write(content)
+ out.WriteString("</a>")
+ return
+}
+
+func (options *Html) RawHtmlTag(out *bytes.Buffer, text []byte) {
+ if options.flags&HTML_SKIP_HTML != 0 {
+ return
+ }
+ if options.flags&HTML_SKIP_STYLE != 0 && isHtmlTag(text, "style") {
+ return
+ }
+ if options.flags&HTML_SKIP_LINKS != 0 && isHtmlTag(text, "a") {
+ return
+ }
+ if options.flags&HTML_SKIP_IMAGES != 0 && isHtmlTag(text, "img") {
+ return
+ }
+ out.Write(text)
+}
+
+func (options *Html) TripleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("<strong><em>")
+ out.Write(text)
+ out.WriteString("</em></strong>")
+}
+
+func (options *Html) StrikeThrough(out *bytes.Buffer, text []byte) {
+ out.WriteString("<del>")
+ out.Write(text)
+ out.WriteString("</del>")
+}
+
+func (options *Html) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
+ slug := slugify(ref)
+ out.WriteString(`<sup class="footnote-ref" id="`)
+ out.WriteString(`fnref:`)
+ out.WriteString(options.parameters.FootnoteAnchorPrefix)
+ out.Write(slug)
+ out.WriteString(`"><a href="#`)
+ out.WriteString(`fn:`)
+ out.WriteString(options.parameters.FootnoteAnchorPrefix)
+ out.Write(slug)
+ out.WriteString(`">`)
+ out.WriteString(strconv.Itoa(id))
+ out.WriteString(`</a></sup>`)
+}
+
+func (options *Html) Entity(out *bytes.Buffer, entity []byte) {
+ out.Write(entity)
+}
+
+func (options *Html) NormalText(out *bytes.Buffer, text []byte) {
+ if options.flags&HTML_USE_SMARTYPANTS != 0 {
+ options.Smartypants(out, text)
+ } else {
+ attrEscape(out, text)
+ }
+}
+
+func (options *Html) Smartypants(out *bytes.Buffer, text []byte) {
+ smrt := smartypantsData{false, false}
+
+ // first do normal entity escaping
+ var escaped bytes.Buffer
+ attrEscape(&escaped, text)
+ text = escaped.Bytes()
+
+ mark := 0
+ for i := 0; i < len(text); i++ {
+ if action := options.smartypants[text[i]]; action != nil {
+ if i > mark {
+ out.Write(text[mark:i])
+ }
+
+ previousChar := byte(0)
+ if i > 0 {
+ previousChar = text[i-1]
+ }
+ i += action(out, &smrt, previousChar, text[i:])
+ mark = i + 1
+ }
+ }
+
+ if mark < len(text) {
+ out.Write(text[mark:])
+ }
+}
+
+func (options *Html) DocumentHeader(out *bytes.Buffer) {
+ if options.flags&HTML_COMPLETE_PAGE == 0 {
+ return
+ }
+
+ ending := ""
+ if options.flags&HTML_USE_XHTML != 0 {
+ out.WriteString("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
+ out.WriteString("\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
+ out.WriteString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
+ ending = " /"
+ } else {
+ out.WriteString("<!DOCTYPE html>\n")
+ out.WriteString("<html>\n")
+ }
+ out.WriteString("<head>\n")
+ out.WriteString(" <title>")
+ options.NormalText(out, []byte(options.title))
+ out.WriteString("</title>\n")
+ out.WriteString(" <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
+ out.WriteString(VERSION)
+ out.WriteString("\"")
+ out.WriteString(ending)
+ out.WriteString(">\n")
+ out.WriteString(" <meta charset=\"utf-8\"")
+ out.WriteString(ending)
+ out.WriteString(">\n")
+ if options.css != "" {
+ out.WriteString(" <link rel=\"stylesheet\" type=\"text/css\" href=\"")
+ attrEscape(out, []byte(options.css))
+ out.WriteString("\"")
+ out.WriteString(ending)
+ out.WriteString(">\n")
+ }
+ out.WriteString("</head>\n")
+ out.WriteString("<body>\n")
+
+ options.tocMarker = out.Len()
+}
+
+func (options *Html) DocumentFooter(out *bytes.Buffer) {
+ // finalize and insert the table of contents
+ if options.flags&HTML_TOC != 0 {
+ options.TocFinalize()
+
+ // now we have to insert the table of contents into the document
+ var temp bytes.Buffer
+
+ // start by making a copy of everything after the document header
+ temp.Write(out.Bytes()[options.tocMarker:])
+
+ // now clear the copied material from the main output buffer
+ out.Truncate(options.tocMarker)
+
+ // corner case spacing issue
+ if options.flags&HTML_COMPLETE_PAGE != 0 {
+ out.WriteByte('\n')
+ }
+
+ // insert the table of contents
+ out.WriteString("<nav>\n")
+ out.Write(options.toc.Bytes())
+ out.WriteString("</nav>\n")
+
+ // corner case spacing issue
+ if options.flags&HTML_COMPLETE_PAGE == 0 && options.flags&HTML_OMIT_CONTENTS == 0 {
+ out.WriteByte('\n')
+ }
+
+ // write out everything that came after it
+ if options.flags&HTML_OMIT_CONTENTS == 0 {
+ out.Write(temp.Bytes())
+ }
+ }
+
+ if options.flags&HTML_COMPLETE_PAGE != 0 {
+ out.WriteString("\n</body>\n")
+ out.WriteString("</html>\n")
+ }
+
+}
+
+func (options *Html) TocHeaderWithAnchor(text []byte, level int, anchor string) {
+ for level > options.currentLevel {
+ switch {
+ case bytes.HasSuffix(options.toc.Bytes(), []byte("</li>\n")):
+ // this sublist can nest underneath a header
+ size := options.toc.Len()
+ options.toc.Truncate(size - len("</li>\n"))
+
+ case options.currentLevel > 0:
+ options.toc.WriteString("<li>")
+ }
+ if options.toc.Len() > 0 {
+ options.toc.WriteByte('\n')
+ }
+ options.toc.WriteString("<ul>\n")
+ options.currentLevel++
+ }
+
+ for level < options.currentLevel {
+ options.toc.WriteString("</ul>")
+ if options.currentLevel > 1 {
+ options.toc.WriteString("</li>\n")
+ }
+ options.currentLevel--
+ }
+
+ options.toc.WriteString("<li><a href=\"#")
+ if anchor != "" {
+ options.toc.WriteString(anchor)
+ } else {
+ options.toc.WriteString("toc_")
+ options.toc.WriteString(strconv.Itoa(options.headerCount))
+ }
+ options.toc.WriteString("\">")
+ options.headerCount++
+
+ options.toc.Write(text)
+
+ options.toc.WriteString("</a></li>\n")
+}
+
+func (options *Html) TocHeader(text []byte, level int) {
+ options.TocHeaderWithAnchor(text, level, "")
+}
+
+func (options *Html) TocFinalize() {
+ for options.currentLevel > 1 {
+ options.toc.WriteString("</ul></li>\n")
+ options.currentLevel--
+ }
+
+ if options.currentLevel > 0 {
+ options.toc.WriteString("</ul>\n")
+ }
+}
+
+func isHtmlTag(tag []byte, tagname string) bool {
+ found, _ := findHtmlTagPos(tag, tagname)
+ return found
+}
+
+// Look for a character, but ignore it when it's in any kind of quotes, it
+// might be JavaScript
+func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
+ inSingleQuote := false
+ inDoubleQuote := false
+ inGraveQuote := false
+ i := start
+ for i < len(html) {
+ switch {
+ case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
+ return i
+ case html[i] == '\'':
+ inSingleQuote = !inSingleQuote
+ case html[i] == '"':
+ inDoubleQuote = !inDoubleQuote
+ case html[i] == '`':
+ inGraveQuote = !inGraveQuote
+ }
+ i++
+ }
+ return start
+}
+
+func findHtmlTagPos(tag []byte, tagname string) (bool, int) {
+ i := 0
+ if i < len(tag) && tag[0] != '<' {
+ return false, -1
+ }
+ i++
+ i = skipSpace(tag, i)
+
+ if i < len(tag) && tag[i] == '/' {
+ i++
+ }
+
+ i = skipSpace(tag, i)
+ j := 0
+ for ; i < len(tag); i, j = i+1, j+1 {
+ if j >= len(tagname) {
+ break
+ }
+
+ if strings.ToLower(string(tag[i]))[0] != tagname[j] {
+ return false, -1
+ }
+ }
+
+ if i == len(tag) {
+ return false, -1
+ }
+
+ rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
+ if rightAngle > i {
+ return true, rightAngle
+ }
+
+ return false, -1
+}
+
+func skipUntilChar(text []byte, start int, char byte) int {
+ i := start
+ for i < len(text) && text[i] != char {
+ i++
+ }
+ return i
+}
+
+func skipSpace(tag []byte, i int) int {
+ for i < len(tag) && isspace(tag[i]) {
+ i++
+ }
+ return i
+}
+
+func skipChar(data []byte, start int, char byte) int {
+ i := start
+ for i < len(data) && data[i] == char {
+ i++
+ }
+ return i
+}
+
+func doubleSpace(out *bytes.Buffer) {
+ if out.Len() > 0 {
+ out.WriteByte('\n')
+ }
+}
+
+func isRelativeLink(link []byte) (yes bool) {
+ // a tag begin with '#'
+ if link[0] == '#' {
+ return true
+ }
+
+ // link begin with '/' but not '//', the second maybe a protocol relative link
+ if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
+ return true
+ }
+
+ // only the root '/'
+ if len(link) == 1 && link[0] == '/' {
+ return true
+ }
+
+ // current directory : begin with "./"
+ if bytes.HasPrefix(link, []byte("./")) {
+ return true
+ }
+
+ // parent directory : begin with "../"
+ if bytes.HasPrefix(link, []byte("../")) {
+ return true
+ }
+
+ return false
+}
+
+func (options *Html) ensureUniqueHeaderID(id string) string {
+ for count, found := options.headerIDs[id]; found; count, found = options.headerIDs[id] {
+ tmp := fmt.Sprintf("%s-%d", id, count+1)
+
+ if _, tmpFound := options.headerIDs[tmp]; !tmpFound {
+ options.headerIDs[id] = count + 1
+ id = tmp
+ } else {
+ id = id + "-1"
+ }
+ }
+
+ if _, found := options.headerIDs[id]; !found {
+ options.headerIDs[id] = 0
+ }
+
+ return id
+}
diff --git a/vendor/github.com/russross/blackfriday/inline.go b/vendor/github.com/russross/blackfriday/inline.go
new file mode 100644
index 00000000..4483b8f1
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/inline.go
@@ -0,0 +1,1154 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+// Functions to parse inline elements.
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "regexp"
+ "strconv"
+)
+
+var (
+ urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
+ anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
+)
+
+// Functions to parse text within a block
+// Each function returns the number of chars taken care of
+// data is the complete block being rendered
+// offset is the number of valid chars before the current cursor
+
+func (p *parser) inline(out *bytes.Buffer, data []byte) {
+ // this is called recursively: enforce a maximum depth
+ if p.nesting >= p.maxNesting {
+ return
+ }
+ p.nesting++
+
+ i, end := 0, 0
+ for i < len(data) {
+ // copy inactive chars into the output
+ for end < len(data) && p.inlineCallback[data[end]] == nil {
+ end++
+ }
+
+ p.r.NormalText(out, data[i:end])
+
+ if end >= len(data) {
+ break
+ }
+ i = end
+
+ // call the trigger
+ handler := p.inlineCallback[data[end]]
+ if consumed := handler(p, out, data, i); consumed == 0 {
+ // no action from the callback; buffer the byte for later
+ end = i + 1
+ } else {
+ // skip past whatever the callback used
+ i += consumed
+ end = i
+ }
+ }
+
+ p.nesting--
+}
+
+// single and double emphasis parsing
+func emphasis(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ data = data[offset:]
+ c := data[0]
+ ret := 0
+
+ if len(data) > 2 && data[1] != c {
+ // whitespace cannot follow an opening emphasis;
+ // strikethrough only takes two characters '~~'
+ if c == '~' || isspace(data[1]) {
+ return 0
+ }
+ if ret = helperEmphasis(p, out, data[1:], c); ret == 0 {
+ return 0
+ }
+
+ return ret + 1
+ }
+
+ if len(data) > 3 && data[1] == c && data[2] != c {
+ if isspace(data[2]) {
+ return 0
+ }
+ if ret = helperDoubleEmphasis(p, out, data[2:], c); ret == 0 {
+ return 0
+ }
+
+ return ret + 2
+ }
+
+ if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
+ if c == '~' || isspace(data[3]) {
+ return 0
+ }
+ if ret = helperTripleEmphasis(p, out, data, 3, c); ret == 0 {
+ return 0
+ }
+
+ return ret + 3
+ }
+
+ return 0
+}
+
+func codeSpan(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ data = data[offset:]
+
+ nb := 0
+
+ // count the number of backticks in the delimiter
+ for nb < len(data) && data[nb] == '`' {
+ nb++
+ }
+
+ // find the next delimiter
+ i, end := 0, 0
+ for end = nb; end < len(data) && i < nb; end++ {
+ if data[end] == '`' {
+ i++
+ } else {
+ i = 0
+ }
+ }
+
+ // no matching delimiter?
+ if i < nb && end >= len(data) {
+ return 0
+ }
+
+ // trim outside whitespace
+ fBegin := nb
+ for fBegin < end && data[fBegin] == ' ' {
+ fBegin++
+ }
+
+ fEnd := end - nb
+ for fEnd > fBegin && data[fEnd-1] == ' ' {
+ fEnd--
+ }
+
+ // render the code span
+ if fBegin != fEnd {
+ p.r.CodeSpan(out, data[fBegin:fEnd])
+ }
+
+ return end
+
+}
+
+// newline preceded by two spaces becomes <br>
+// newline without two spaces works when EXTENSION_HARD_LINE_BREAK is enabled
+func lineBreak(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ // remove trailing spaces from out
+ outBytes := out.Bytes()
+ end := len(outBytes)
+ eol := end
+ for eol > 0 && outBytes[eol-1] == ' ' {
+ eol--
+ }
+ out.Truncate(eol)
+
+ precededByTwoSpaces := offset >= 2 && data[offset-2] == ' ' && data[offset-1] == ' '
+ precededByBackslash := offset >= 1 && data[offset-1] == '\\' // see http://spec.commonmark.org/0.18/#example-527
+ precededByBackslash = precededByBackslash && p.flags&EXTENSION_BACKSLASH_LINE_BREAK != 0
+
+ if p.flags&EXTENSION_JOIN_LINES != 0 {
+ return 1
+ }
+
+ // should there be a hard line break here?
+ if p.flags&EXTENSION_HARD_LINE_BREAK == 0 && !precededByTwoSpaces && !precededByBackslash {
+ return 0
+ }
+
+ if precededByBackslash && eol > 0 {
+ out.Truncate(eol - 1)
+ }
+ p.r.LineBreak(out)
+ return 1
+}
+
+type linkType int
+
+const (
+ linkNormal linkType = iota
+ linkImg
+ linkDeferredFootnote
+ linkInlineFootnote
+)
+
+func isReferenceStyleLink(data []byte, pos int, t linkType) bool {
+ if t == linkDeferredFootnote {
+ return false
+ }
+ return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^'
+}
+
+// '[': parse a link or an image or a footnote
+func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ // no links allowed inside regular links, footnote, and deferred footnotes
+ if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
+ return 0
+ }
+
+ var t linkType
+ switch {
+ // special case: ![^text] == deferred footnote (that follows something with
+ // an exclamation point)
+ case p.flags&EXTENSION_FOOTNOTES != 0 && len(data)-1 > offset && data[offset+1] == '^':
+ t = linkDeferredFootnote
+ // ![alt] == image
+ case offset > 0 && data[offset-1] == '!':
+ t = linkImg
+ // ^[text] == inline footnote
+ // [^refId] == deferred footnote
+ case p.flags&EXTENSION_FOOTNOTES != 0:
+ if offset > 0 && data[offset-1] == '^' {
+ t = linkInlineFootnote
+ } else if len(data)-1 > offset && data[offset+1] == '^' {
+ t = linkDeferredFootnote
+ }
+ // [text] == regular link
+ default:
+ t = linkNormal
+ }
+
+ data = data[offset:]
+
+ var (
+ i = 1
+ noteId int
+ title, link, altContent []byte
+ textHasNl = false
+ )
+
+ if t == linkDeferredFootnote {
+ i++
+ }
+
+ brace := 0
+
+ // look for the matching closing bracket
+ for level := 1; level > 0 && i < len(data); i++ {
+ switch {
+ case data[i] == '\n':
+ textHasNl = true
+
+ case data[i-1] == '\\':
+ continue
+
+ case data[i] == '[':
+ level++
+
+ case data[i] == ']':
+ level--
+ if level <= 0 {
+ i-- // compensate for extra i++ in for loop
+ }
+ }
+ }
+
+ if i >= len(data) {
+ return 0
+ }
+
+ txtE := i
+ i++
+
+ // skip any amount of whitespace or newline
+ // (this is much more lax than original markdown syntax)
+ for i < len(data) && isspace(data[i]) {
+ i++
+ }
+
+ switch {
+ // inline style link
+ case i < len(data) && data[i] == '(':
+ // skip initial whitespace
+ i++
+
+ for i < len(data) && isspace(data[i]) {
+ i++
+ }
+
+ linkB := i
+
+ // look for link end: ' " ), check for new opening braces and take this
+ // into account, this may lead for overshooting and probably will require
+ // some fine-tuning.
+ findlinkend:
+ for i < len(data) {
+ switch {
+ case data[i] == '\\':
+ i += 2
+
+ case data[i] == '(':
+ brace++
+ i++
+
+ case data[i] == ')':
+ if brace <= 0 {
+ break findlinkend
+ }
+ brace--
+ i++
+
+ case data[i] == '\'' || data[i] == '"':
+ break findlinkend
+
+ default:
+ i++
+ }
+ }
+
+ if i >= len(data) {
+ return 0
+ }
+ linkE := i
+
+ // look for title end if present
+ titleB, titleE := 0, 0
+ if data[i] == '\'' || data[i] == '"' {
+ i++
+ titleB = i
+
+ findtitleend:
+ for i < len(data) {
+ switch {
+ case data[i] == '\\':
+ i += 2
+
+ case data[i] == ')':
+ break findtitleend
+
+ default:
+ i++
+ }
+ }
+
+ if i >= len(data) {
+ return 0
+ }
+
+ // skip whitespace after title
+ titleE = i - 1
+ for titleE > titleB && isspace(data[titleE]) {
+ titleE--
+ }
+
+ // check for closing quote presence
+ if data[titleE] != '\'' && data[titleE] != '"' {
+ titleB, titleE = 0, 0
+ linkE = i
+ }
+ }
+
+ // remove whitespace at the end of the link
+ for linkE > linkB && isspace(data[linkE-1]) {
+ linkE--
+ }
+
+ // remove optional angle brackets around the link
+ if data[linkB] == '<' {
+ linkB++
+ }
+ if data[linkE-1] == '>' {
+ linkE--
+ }
+
+ // build escaped link and title
+ if linkE > linkB {
+ link = data[linkB:linkE]
+ }
+
+ if titleE > titleB {
+ title = data[titleB:titleE]
+ }
+
+ i++
+
+ // reference style link
+ case isReferenceStyleLink(data, i, t):
+ var id []byte
+ altContentConsidered := false
+
+ // look for the id
+ i++
+ linkB := i
+ for i < len(data) && data[i] != ']' {
+ i++
+ }
+ if i >= len(data) {
+ return 0
+ }
+ linkE := i
+
+ // find the reference
+ if linkB == linkE {
+ if textHasNl {
+ var b bytes.Buffer
+
+ for j := 1; j < txtE; j++ {
+ switch {
+ case data[j] != '\n':
+ b.WriteByte(data[j])
+ case data[j-1] != ' ':
+ b.WriteByte(' ')
+ }
+ }
+
+ id = b.Bytes()
+ } else {
+ id = data[1:txtE]
+ altContentConsidered = true
+ }
+ } else {
+ id = data[linkB:linkE]
+ }
+
+ // find the reference with matching id
+ lr, ok := p.getRef(string(id))
+ if !ok {
+ return 0
+ }
+
+ // keep link and title from reference
+ link = lr.link
+ title = lr.title
+ if altContentConsidered {
+ altContent = lr.text
+ }
+ i++
+
+ // shortcut reference style link or reference or inline footnote
+ default:
+ var id []byte
+
+ // craft the id
+ if textHasNl {
+ var b bytes.Buffer
+
+ for j := 1; j < txtE; j++ {
+ switch {
+ case data[j] != '\n':
+ b.WriteByte(data[j])
+ case data[j-1] != ' ':
+ b.WriteByte(' ')
+ }
+ }
+
+ id = b.Bytes()
+ } else {
+ if t == linkDeferredFootnote {
+ id = data[2:txtE] // get rid of the ^
+ } else {
+ id = data[1:txtE]
+ }
+ }
+
+ if t == linkInlineFootnote {
+ // create a new reference
+ noteId = len(p.notes) + 1
+
+ var fragment []byte
+ if len(id) > 0 {
+ if len(id) < 16 {
+ fragment = make([]byte, len(id))
+ } else {
+ fragment = make([]byte, 16)
+ }
+ copy(fragment, slugify(id))
+ } else {
+ fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteId))...)
+ }
+
+ ref := &reference{
+ noteId: noteId,
+ hasBlock: false,
+ link: fragment,
+ title: id,
+ }
+
+ p.notes = append(p.notes, ref)
+ p.notesRecord[string(ref.link)] = struct{}{}
+
+ link = ref.link
+ title = ref.title
+ } else {
+ // find the reference with matching id
+ lr, ok := p.getRef(string(id))
+ if !ok {
+ return 0
+ }
+
+ if t == linkDeferredFootnote && !p.isFootnote(lr) {
+ lr.noteId = len(p.notes) + 1
+ p.notes = append(p.notes, lr)
+ p.notesRecord[string(lr.link)] = struct{}{}
+ }
+
+ // keep link and title from reference
+ link = lr.link
+ // if inline footnote, title == footnote contents
+ title = lr.title
+ noteId = lr.noteId
+ }
+
+ // rewind the whitespace
+ i = txtE + 1
+ }
+
+ // build content: img alt is escaped, link content is parsed
+ var content bytes.Buffer
+ if txtE > 1 {
+ if t == linkImg {
+ content.Write(data[1:txtE])
+ } else {
+ // links cannot contain other links, so turn off link parsing temporarily
+ insideLink := p.insideLink
+ p.insideLink = true
+ p.inline(&content, data[1:txtE])
+ p.insideLink = insideLink
+ }
+ }
+
+ var uLink []byte
+ if t == linkNormal || t == linkImg {
+ if len(link) > 0 {
+ var uLinkBuf bytes.Buffer
+ unescapeText(&uLinkBuf, link)
+ uLink = uLinkBuf.Bytes()
+ }
+
+ // links need something to click on and somewhere to go
+ if len(uLink) == 0 || (t == linkNormal && content.Len() == 0) {
+ return 0
+ }
+ }
+
+ // call the relevant rendering function
+ switch t {
+ case linkNormal:
+ if len(altContent) > 0 {
+ p.r.Link(out, uLink, title, altContent)
+ } else {
+ p.r.Link(out, uLink, title, content.Bytes())
+ }
+
+ case linkImg:
+ outSize := out.Len()
+ outBytes := out.Bytes()
+ if outSize > 0 && outBytes[outSize-1] == '!' {
+ out.Truncate(outSize - 1)
+ }
+
+ p.r.Image(out, uLink, title, content.Bytes())
+
+ case linkInlineFootnote:
+ outSize := out.Len()
+ outBytes := out.Bytes()
+ if outSize > 0 && outBytes[outSize-1] == '^' {
+ out.Truncate(outSize - 1)
+ }
+
+ p.r.FootnoteRef(out, link, noteId)
+
+ case linkDeferredFootnote:
+ p.r.FootnoteRef(out, link, noteId)
+
+ default:
+ return 0
+ }
+
+ return i
+}
+
+func (p *parser) inlineHTMLComment(out *bytes.Buffer, data []byte) int {
+ if len(data) < 5 {
+ return 0
+ }
+ if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
+ return 0
+ }
+ i := 5
+ // scan for an end-of-comment marker, across lines if necessary
+ for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
+ i++
+ }
+ // no end-of-comment marker
+ if i >= len(data) {
+ return 0
+ }
+ return i + 1
+}
+
+// '<' when tags or autolinks are allowed
+func leftAngle(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ data = data[offset:]
+ altype := LINK_TYPE_NOT_AUTOLINK
+ end := tagLength(data, &altype)
+ if size := p.inlineHTMLComment(out, data); size > 0 {
+ end = size
+ }
+ if end > 2 {
+ if altype != LINK_TYPE_NOT_AUTOLINK {
+ var uLink bytes.Buffer
+ unescapeText(&uLink, data[1:end+1-2])
+ if uLink.Len() > 0 {
+ p.r.AutoLink(out, uLink.Bytes(), altype)
+ }
+ } else {
+ p.r.RawHtmlTag(out, data[:end])
+ }
+ }
+
+ return end
+}
+
+// '\\' backslash escape
+var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
+
+func escape(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ data = data[offset:]
+
+ if len(data) > 1 {
+ if bytes.IndexByte(escapeChars, data[1]) < 0 {
+ return 0
+ }
+
+ p.r.NormalText(out, data[1:2])
+ }
+
+ return 2
+}
+
+func unescapeText(ob *bytes.Buffer, src []byte) {
+ i := 0
+ for i < len(src) {
+ org := i
+ for i < len(src) && src[i] != '\\' {
+ i++
+ }
+
+ if i > org {
+ ob.Write(src[org:i])
+ }
+
+ if i+1 >= len(src) {
+ break
+ }
+
+ ob.WriteByte(src[i+1])
+ i += 2
+ }
+}
+
+// '&' escaped when it doesn't belong to an entity
+// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
+func entity(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ data = data[offset:]
+
+ end := 1
+
+ if end < len(data) && data[end] == '#' {
+ end++
+ }
+
+ for end < len(data) && isalnum(data[end]) {
+ end++
+ }
+
+ if end < len(data) && data[end] == ';' {
+ end++ // real entity
+ } else {
+ return 0 // lone '&'
+ }
+
+ p.r.Entity(out, data[:end])
+
+ return end
+}
+
+func linkEndsWithEntity(data []byte, linkEnd int) bool {
+ entityRanges := htmlEntity.FindAllIndex(data[:linkEnd], -1)
+ return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
+}
+
+func autoLink(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ // quick check to rule out most false hits on ':'
+ if p.insideLink || len(data) < offset+3 || data[offset+1] != '/' || data[offset+2] != '/' {
+ return 0
+ }
+
+ // Now a more expensive check to see if we're not inside an anchor element
+ anchorStart := offset
+ offsetFromAnchor := 0
+ for anchorStart > 0 && data[anchorStart] != '<' {
+ anchorStart--
+ offsetFromAnchor++
+ }
+
+ anchorStr := anchorRe.Find(data[anchorStart:])
+ if anchorStr != nil {
+ out.Write(anchorStr[offsetFromAnchor:])
+ return len(anchorStr) - offsetFromAnchor
+ }
+
+ // scan backward for a word boundary
+ rewind := 0
+ for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) {
+ rewind++
+ }
+ if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
+ return 0
+ }
+
+ origData := data
+ data = data[offset-rewind:]
+
+ if !isSafeLink(data) {
+ return 0
+ }
+
+ linkEnd := 0
+ for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
+ linkEnd++
+ }
+
+ // Skip punctuation at the end of the link
+ if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
+ linkEnd--
+ }
+
+ // But don't skip semicolon if it's a part of escaped entity:
+ if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
+ linkEnd--
+ }
+
+ // See if the link finishes with a punctuation sign that can be closed.
+ var copen byte
+ switch data[linkEnd-1] {
+ case '"':
+ copen = '"'
+ case '\'':
+ copen = '\''
+ case ')':
+ copen = '('
+ case ']':
+ copen = '['
+ case '}':
+ copen = '{'
+ default:
+ copen = 0
+ }
+
+ if copen != 0 {
+ bufEnd := offset - rewind + linkEnd - 2
+
+ openDelim := 1
+
+ /* Try to close the final punctuation sign in this same line;
+ * if we managed to close it outside of the URL, that means that it's
+ * not part of the URL. If it closes inside the URL, that means it
+ * is part of the URL.
+ *
+ * Examples:
+ *
+ * foo http://www.pokemon.com/Pikachu_(Electric) bar
+ * => http://www.pokemon.com/Pikachu_(Electric)
+ *
+ * foo (http://www.pokemon.com/Pikachu_(Electric)) bar
+ * => http://www.pokemon.com/Pikachu_(Electric)
+ *
+ * foo http://www.pokemon.com/Pikachu_(Electric)) bar
+ * => http://www.pokemon.com/Pikachu_(Electric))
+ *
+ * (foo http://www.pokemon.com/Pikachu_(Electric)) bar
+ * => foo http://www.pokemon.com/Pikachu_(Electric)
+ */
+
+ for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
+ if origData[bufEnd] == data[linkEnd-1] {
+ openDelim++
+ }
+
+ if origData[bufEnd] == copen {
+ openDelim--
+ }
+
+ bufEnd--
+ }
+
+ if openDelim == 0 {
+ linkEnd--
+ }
+ }
+
+ // we were triggered on the ':', so we need to rewind the output a bit
+ if out.Len() >= rewind {
+ out.Truncate(len(out.Bytes()) - rewind)
+ }
+
+ var uLink bytes.Buffer
+ unescapeText(&uLink, data[:linkEnd])
+
+ if uLink.Len() > 0 {
+ p.r.AutoLink(out, uLink.Bytes(), LINK_TYPE_NORMAL)
+ }
+
+ return linkEnd - rewind
+}
+
+func isEndOfLink(char byte) bool {
+ return isspace(char) || char == '<'
+}
+
+var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
+var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
+
+func isSafeLink(link []byte) bool {
+ for _, path := range validPaths {
+ if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
+ if len(link) == len(path) {
+ return true
+ } else if isalnum(link[len(path)]) {
+ return true
+ }
+ }
+ }
+
+ for _, prefix := range validUris {
+ // TODO: handle unicode here
+ // case-insensitive prefix test
+ if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// return the length of the given tag, or 0 is it's not valid
+func tagLength(data []byte, autolink *int) int {
+ var i, j int
+
+ // a valid tag can't be shorter than 3 chars
+ if len(data) < 3 {
+ return 0
+ }
+
+ // begins with a '<' optionally followed by '/', followed by letter or number
+ if data[0] != '<' {
+ return 0
+ }
+ if data[1] == '/' {
+ i = 2
+ } else {
+ i = 1
+ }
+
+ if !isalnum(data[i]) {
+ return 0
+ }
+
+ // scheme test
+ *autolink = LINK_TYPE_NOT_AUTOLINK
+
+ // try to find the beginning of an URI
+ for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
+ i++
+ }
+
+ if i > 1 && i < len(data) && data[i] == '@' {
+ if j = isMailtoAutoLink(data[i:]); j != 0 {
+ *autolink = LINK_TYPE_EMAIL
+ return i + j
+ }
+ }
+
+ if i > 2 && i < len(data) && data[i] == ':' {
+ *autolink = LINK_TYPE_NORMAL
+ i++
+ }
+
+ // complete autolink test: no whitespace or ' or "
+ switch {
+ case i >= len(data):
+ *autolink = LINK_TYPE_NOT_AUTOLINK
+ case *autolink != 0:
+ j = i
+
+ for i < len(data) {
+ if data[i] == '\\' {
+ i += 2
+ } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
+ break
+ } else {
+ i++
+ }
+
+ }
+
+ if i >= len(data) {
+ return 0
+ }
+ if i > j && data[i] == '>' {
+ return i + 1
+ }
+
+ // one of the forbidden chars has been found
+ *autolink = LINK_TYPE_NOT_AUTOLINK
+ }
+
+ // look for something looking like a tag end
+ for i < len(data) && data[i] != '>' {
+ i++
+ }
+ if i >= len(data) {
+ return 0
+ }
+ return i + 1
+}
+
+// look for the address part of a mail autolink and '>'
+// this is less strict than the original markdown e-mail address matching
+func isMailtoAutoLink(data []byte) int {
+ nb := 0
+
+ // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
+ for i := 0; i < len(data); i++ {
+ if isalnum(data[i]) {
+ continue
+ }
+
+ switch data[i] {
+ case '@':
+ nb++
+
+ case '-', '.', '_':
+ // Do nothing.
+
+ case '>':
+ if nb == 1 {
+ return i + 1
+ } else {
+ return 0
+ }
+ default:
+ return 0
+ }
+ }
+
+ return 0
+}
+
+// look for the next emph char, skipping other constructs
+func helperFindEmphChar(data []byte, c byte) int {
+ i := 0
+
+ for i < len(data) {
+ for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
+ i++
+ }
+ if i >= len(data) {
+ return 0
+ }
+ // do not count escaped chars
+ if i != 0 && data[i-1] == '\\' {
+ i++
+ continue
+ }
+ if data[i] == c {
+ return i
+ }
+
+ if data[i] == '`' {
+ // skip a code span
+ tmpI := 0
+ i++
+ for i < len(data) && data[i] != '`' {
+ if tmpI == 0 && data[i] == c {
+ tmpI = i
+ }
+ i++
+ }
+ if i >= len(data) {
+ return tmpI
+ }
+ i++
+ } else if data[i] == '[' {
+ // skip a link
+ tmpI := 0
+ i++
+ for i < len(data) && data[i] != ']' {
+ if tmpI == 0 && data[i] == c {
+ tmpI = i
+ }
+ i++
+ }
+ i++
+ for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
+ i++
+ }
+ if i >= len(data) {
+ return tmpI
+ }
+ if data[i] != '[' && data[i] != '(' { // not a link
+ if tmpI > 0 {
+ return tmpI
+ } else {
+ continue
+ }
+ }
+ cc := data[i]
+ i++
+ for i < len(data) && data[i] != cc {
+ if tmpI == 0 && data[i] == c {
+ return i
+ }
+ i++
+ }
+ if i >= len(data) {
+ return tmpI
+ }
+ i++
+ }
+ }
+ return 0
+}
+
+func helperEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
+ i := 0
+
+ // skip one symbol if coming from emph3
+ if len(data) > 1 && data[0] == c && data[1] == c {
+ i = 1
+ }
+
+ for i < len(data) {
+ length := helperFindEmphChar(data[i:], c)
+ if length == 0 {
+ return 0
+ }
+ i += length
+ if i >= len(data) {
+ return 0
+ }
+
+ if i+1 < len(data) && data[i+1] == c {
+ i++
+ continue
+ }
+
+ if data[i] == c && !isspace(data[i-1]) {
+
+ if p.flags&EXTENSION_NO_INTRA_EMPHASIS != 0 {
+ if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
+ continue
+ }
+ }
+
+ var work bytes.Buffer
+ p.inline(&work, data[:i])
+ p.r.Emphasis(out, work.Bytes())
+ return i + 1
+ }
+ }
+
+ return 0
+}
+
+func helperDoubleEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
+ i := 0
+
+ for i < len(data) {
+ length := helperFindEmphChar(data[i:], c)
+ if length == 0 {
+ return 0
+ }
+ i += length
+
+ if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
+ var work bytes.Buffer
+ p.inline(&work, data[:i])
+
+ if work.Len() > 0 {
+ // pick the right renderer
+ if c == '~' {
+ p.r.StrikeThrough(out, work.Bytes())
+ } else {
+ p.r.DoubleEmphasis(out, work.Bytes())
+ }
+ }
+ return i + 2
+ }
+ i++
+ }
+ return 0
+}
+
+func helperTripleEmphasis(p *parser, out *bytes.Buffer, data []byte, offset int, c byte) int {
+ i := 0
+ origData := data
+ data = data[offset:]
+
+ for i < len(data) {
+ length := helperFindEmphChar(data[i:], c)
+ if length == 0 {
+ return 0
+ }
+ i += length
+
+ // skip whitespace preceded symbols
+ if data[i] != c || isspace(data[i-1]) {
+ continue
+ }
+
+ switch {
+ case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
+ // triple symbol found
+ var work bytes.Buffer
+
+ p.inline(&work, data[:i])
+ if work.Len() > 0 {
+ p.r.TripleEmphasis(out, work.Bytes())
+ }
+ return i + 3
+ case (i+1 < len(data) && data[i+1] == c):
+ // double symbol found, hand over to emph1
+ length = helperEmphasis(p, out, origData[offset-2:], c)
+ if length == 0 {
+ return 0
+ } else {
+ return length - 2
+ }
+ default:
+ // single symbol found, hand over to emph2
+ length = helperDoubleEmphasis(p, out, origData[offset-1:], c)
+ if length == 0 {
+ return 0
+ } else {
+ return length - 1
+ }
+ }
+ }
+ return 0
+}
diff --git a/vendor/github.com/russross/blackfriday/latex.go b/vendor/github.com/russross/blackfriday/latex.go
new file mode 100644
index 00000000..70705aa9
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/latex.go
@@ -0,0 +1,332 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// LaTeX rendering backend
+//
+//
+
+package blackfriday
+
+import (
+ "bytes"
+)
+
+// Latex is a type that implements the Renderer interface for LaTeX output.
+//
+// Do not create this directly, instead use the LatexRenderer function.
+type Latex struct {
+}
+
+// LatexRenderer creates and configures a Latex object, which
+// satisfies the Renderer interface.
+//
+// flags is a set of LATEX_* options ORed together (currently no such options
+// are defined).
+func LatexRenderer(flags int) Renderer {
+ return &Latex{}
+}
+
+func (options *Latex) GetFlags() int {
+ return 0
+}
+
+// render code chunks using verbatim, or listings if we have a language
+func (options *Latex) BlockCode(out *bytes.Buffer, text []byte, lang string) {
+ if lang == "" {
+ out.WriteString("\n\\begin{verbatim}\n")
+ } else {
+ out.WriteString("\n\\begin{lstlisting}[language=")
+ out.WriteString(lang)
+ out.WriteString("]\n")
+ }
+ out.Write(text)
+ if lang == "" {
+ out.WriteString("\n\\end{verbatim}\n")
+ } else {
+ out.WriteString("\n\\end{lstlisting}\n")
+ }
+}
+
+func (options *Latex) TitleBlock(out *bytes.Buffer, text []byte) {
+
+}
+
+func (options *Latex) BlockQuote(out *bytes.Buffer, text []byte) {
+ out.WriteString("\n\\begin{quotation}\n")
+ out.Write(text)
+ out.WriteString("\n\\end{quotation}\n")
+}
+
+func (options *Latex) BlockHtml(out *bytes.Buffer, text []byte) {
+ // a pretty lame thing to do...
+ out.WriteString("\n\\begin{verbatim}\n")
+ out.Write(text)
+ out.WriteString("\n\\end{verbatim}\n")
+}
+
+func (options *Latex) Header(out *bytes.Buffer, text func() bool, level int, id string) {
+ marker := out.Len()
+
+ switch level {
+ case 1:
+ out.WriteString("\n\\section{")
+ case 2:
+ out.WriteString("\n\\subsection{")
+ case 3:
+ out.WriteString("\n\\subsubsection{")
+ case 4:
+ out.WriteString("\n\\paragraph{")
+ case 5:
+ out.WriteString("\n\\subparagraph{")
+ case 6:
+ out.WriteString("\n\\textbf{")
+ }
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ out.WriteString("}\n")
+}
+
+func (options *Latex) HRule(out *bytes.Buffer) {
+ out.WriteString("\n\\HRule\n")
+}
+
+func (options *Latex) List(out *bytes.Buffer, text func() bool, flags int) {
+ marker := out.Len()
+ if flags&LIST_TYPE_ORDERED != 0 {
+ out.WriteString("\n\\begin{enumerate}\n")
+ } else {
+ out.WriteString("\n\\begin{itemize}\n")
+ }
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ if flags&LIST_TYPE_ORDERED != 0 {
+ out.WriteString("\n\\end{enumerate}\n")
+ } else {
+ out.WriteString("\n\\end{itemize}\n")
+ }
+}
+
+func (options *Latex) ListItem(out *bytes.Buffer, text []byte, flags int) {
+ out.WriteString("\n\\item ")
+ out.Write(text)
+}
+
+func (options *Latex) Paragraph(out *bytes.Buffer, text func() bool) {
+ marker := out.Len()
+ out.WriteString("\n")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ out.WriteString("\n")
+}
+
+func (options *Latex) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+ out.WriteString("\n\\begin{tabular}{")
+ for _, elt := range columnData {
+ switch elt {
+ case TABLE_ALIGNMENT_LEFT:
+ out.WriteByte('l')
+ case TABLE_ALIGNMENT_RIGHT:
+ out.WriteByte('r')
+ default:
+ out.WriteByte('c')
+ }
+ }
+ out.WriteString("}\n")
+ out.Write(header)
+ out.WriteString(" \\\\\n\\hline\n")
+ out.Write(body)
+ out.WriteString("\n\\end{tabular}\n")
+}
+
+func (options *Latex) TableRow(out *bytes.Buffer, text []byte) {
+ if out.Len() > 0 {
+ out.WriteString(" \\\\\n")
+ }
+ out.Write(text)
+}
+
+func (options *Latex) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString(" & ")
+ }
+ out.Write(text)
+}
+
+func (options *Latex) TableCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString(" & ")
+ }
+ out.Write(text)
+}
+
+// TODO: this
+func (options *Latex) Footnotes(out *bytes.Buffer, text func() bool) {
+
+}
+
+func (options *Latex) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+
+}
+
+func (options *Latex) AutoLink(out *bytes.Buffer, link []byte, kind int) {
+ out.WriteString("\\href{")
+ if kind == LINK_TYPE_EMAIL {
+ out.WriteString("mailto:")
+ }
+ out.Write(link)
+ out.WriteString("}{")
+ out.Write(link)
+ out.WriteString("}")
+}
+
+func (options *Latex) CodeSpan(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\texttt{")
+ escapeSpecialChars(out, text)
+ out.WriteString("}")
+}
+
+func (options *Latex) DoubleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\textbf{")
+ out.Write(text)
+ out.WriteString("}")
+}
+
+func (options *Latex) Emphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\textit{")
+ out.Write(text)
+ out.WriteString("}")
+}
+
+func (options *Latex) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+ if bytes.HasPrefix(link, []byte("http://")) || bytes.HasPrefix(link, []byte("https://")) {
+ // treat it like a link
+ out.WriteString("\\href{")
+ out.Write(link)
+ out.WriteString("}{")
+ out.Write(alt)
+ out.WriteString("}")
+ } else {
+ out.WriteString("\\includegraphics{")
+ out.Write(link)
+ out.WriteString("}")
+ }
+}
+
+func (options *Latex) LineBreak(out *bytes.Buffer) {
+ out.WriteString(" \\\\\n")
+}
+
+func (options *Latex) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+ out.WriteString("\\href{")
+ out.Write(link)
+ out.WriteString("}{")
+ out.Write(content)
+ out.WriteString("}")
+}
+
+func (options *Latex) RawHtmlTag(out *bytes.Buffer, tag []byte) {
+}
+
+func (options *Latex) TripleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\textbf{\\textit{")
+ out.Write(text)
+ out.WriteString("}}")
+}
+
+func (options *Latex) StrikeThrough(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\sout{")
+ out.Write(text)
+ out.WriteString("}")
+}
+
+// TODO: this
+func (options *Latex) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
+
+}
+
+func needsBackslash(c byte) bool {
+ for _, r := range []byte("_{}%$&\\~#") {
+ if c == r {
+ return true
+ }
+ }
+ return false
+}
+
+func escapeSpecialChars(out *bytes.Buffer, text []byte) {
+ for i := 0; i < len(text); i++ {
+ // directly copy normal characters
+ org := i
+
+ for i < len(text) && !needsBackslash(text[i]) {
+ i++
+ }
+ if i > org {
+ out.Write(text[org:i])
+ }
+
+ // escape a character
+ if i >= len(text) {
+ break
+ }
+ out.WriteByte('\\')
+ out.WriteByte(text[i])
+ }
+}
+
+func (options *Latex) Entity(out *bytes.Buffer, entity []byte) {
+ // TODO: convert this into a unicode character or something
+ out.Write(entity)
+}
+
+func (options *Latex) NormalText(out *bytes.Buffer, text []byte) {
+ escapeSpecialChars(out, text)
+}
+
+// header and footer
+func (options *Latex) DocumentHeader(out *bytes.Buffer) {
+ out.WriteString("\\documentclass{article}\n")
+ out.WriteString("\n")
+ out.WriteString("\\usepackage{graphicx}\n")
+ out.WriteString("\\usepackage{listings}\n")
+ out.WriteString("\\usepackage[margin=1in]{geometry}\n")
+ out.WriteString("\\usepackage[utf8]{inputenc}\n")
+ out.WriteString("\\usepackage{verbatim}\n")
+ out.WriteString("\\usepackage[normalem]{ulem}\n")
+ out.WriteString("\\usepackage{hyperref}\n")
+ out.WriteString("\n")
+ out.WriteString("\\hypersetup{colorlinks,%\n")
+ out.WriteString(" citecolor=black,%\n")
+ out.WriteString(" filecolor=black,%\n")
+ out.WriteString(" linkcolor=black,%\n")
+ out.WriteString(" urlcolor=black,%\n")
+ out.WriteString(" pdfstartview=FitH,%\n")
+ out.WriteString(" breaklinks=true,%\n")
+ out.WriteString(" pdfauthor={Blackfriday Markdown Processor v")
+ out.WriteString(VERSION)
+ out.WriteString("}}\n")
+ out.WriteString("\n")
+ out.WriteString("\\newcommand{\\HRule}{\\rule{\\linewidth}{0.5mm}}\n")
+ out.WriteString("\\addtolength{\\parskip}{0.5\\baselineskip}\n")
+ out.WriteString("\\parindent=0pt\n")
+ out.WriteString("\n")
+ out.WriteString("\\begin{document}\n")
+}
+
+func (options *Latex) DocumentFooter(out *bytes.Buffer) {
+ out.WriteString("\n\\end{document}\n")
+}
diff --git a/vendor/github.com/russross/blackfriday/markdown.go b/vendor/github.com/russross/blackfriday/markdown.go
new file mode 100644
index 00000000..1722a738
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/markdown.go
@@ -0,0 +1,931 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// Markdown parsing and processing
+//
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+const VERSION = "1.5"
+
+// These are the supported markdown parsing extensions.
+// OR these values together to select multiple extensions.
+const (
+ EXTENSION_NO_INTRA_EMPHASIS = 1 << iota // ignore emphasis markers inside words
+ EXTENSION_TABLES // render tables
+ EXTENSION_FENCED_CODE // render fenced code blocks
+ EXTENSION_AUTOLINK // detect embedded URLs that are not explicitly marked
+ EXTENSION_STRIKETHROUGH // strikethrough text using ~~test~~
+ EXTENSION_LAX_HTML_BLOCKS // loosen up HTML block parsing rules
+ EXTENSION_SPACE_HEADERS // be strict about prefix header rules
+ EXTENSION_HARD_LINE_BREAK // translate newlines into line breaks
+ EXTENSION_TAB_SIZE_EIGHT // expand tabs to eight spaces instead of four
+ EXTENSION_FOOTNOTES // Pandoc-style footnotes
+ EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
+ EXTENSION_HEADER_IDS // specify header IDs with {#id}
+ EXTENSION_TITLEBLOCK // Titleblock ala pandoc
+ EXTENSION_AUTO_HEADER_IDS // Create the header ID from the text
+ EXTENSION_BACKSLASH_LINE_BREAK // translate trailing backslashes into line breaks
+ EXTENSION_DEFINITION_LISTS // render definition lists
+ EXTENSION_JOIN_LINES // delete newline and join lines
+
+ commonHtmlFlags = 0 |
+ HTML_USE_XHTML |
+ HTML_USE_SMARTYPANTS |
+ HTML_SMARTYPANTS_FRACTIONS |
+ HTML_SMARTYPANTS_DASHES |
+ HTML_SMARTYPANTS_LATEX_DASHES
+
+ commonExtensions = 0 |
+ EXTENSION_NO_INTRA_EMPHASIS |
+ EXTENSION_TABLES |
+ EXTENSION_FENCED_CODE |
+ EXTENSION_AUTOLINK |
+ EXTENSION_STRIKETHROUGH |
+ EXTENSION_SPACE_HEADERS |
+ EXTENSION_HEADER_IDS |
+ EXTENSION_BACKSLASH_LINE_BREAK |
+ EXTENSION_DEFINITION_LISTS
+)
+
+// These are the possible flag values for the link renderer.
+// Only a single one of these values will be used; they are not ORed together.
+// These are mostly of interest if you are writing a new output format.
+const (
+ LINK_TYPE_NOT_AUTOLINK = iota
+ LINK_TYPE_NORMAL
+ LINK_TYPE_EMAIL
+)
+
+// These are the possible flag values for the ListItem renderer.
+// Multiple flag values may be ORed together.
+// These are mostly of interest if you are writing a new output format.
+const (
+ LIST_TYPE_ORDERED = 1 << iota
+ LIST_TYPE_DEFINITION
+ LIST_TYPE_TERM
+ LIST_ITEM_CONTAINS_BLOCK
+ LIST_ITEM_BEGINNING_OF_LIST
+ LIST_ITEM_END_OF_LIST
+)
+
+// These are the possible flag values for the table cell renderer.
+// Only a single one of these values will be used; they are not ORed together.
+// These are mostly of interest if you are writing a new output format.
+const (
+ TABLE_ALIGNMENT_LEFT = 1 << iota
+ TABLE_ALIGNMENT_RIGHT
+ TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
+)
+
+// The size of a tab stop.
+const (
+ TAB_SIZE_DEFAULT = 4
+ TAB_SIZE_EIGHT = 8
+)
+
+// blockTags is a set of tags that are recognized as HTML block tags.
+// Any of these can be included in markdown text without special escaping.
+var blockTags = map[string]struct{}{
+ "blockquote": {},
+ "del": {},
+ "div": {},
+ "dl": {},
+ "fieldset": {},
+ "form": {},
+ "h1": {},
+ "h2": {},
+ "h3": {},
+ "h4": {},
+ "h5": {},
+ "h6": {},
+ "iframe": {},
+ "ins": {},
+ "math": {},
+ "noscript": {},
+ "ol": {},
+ "pre": {},
+ "p": {},
+ "script": {},
+ "style": {},
+ "table": {},
+ "ul": {},
+
+ // HTML5
+ "address": {},
+ "article": {},
+ "aside": {},
+ "canvas": {},
+ "figcaption": {},
+ "figure": {},
+ "footer": {},
+ "header": {},
+ "hgroup": {},
+ "main": {},
+ "nav": {},
+ "output": {},
+ "progress": {},
+ "section": {},
+ "video": {},
+}
+
+// Renderer is the rendering interface.
+// This is mostly of interest if you are implementing a new rendering format.
+//
+// When a byte slice is provided, it contains the (rendered) contents of the
+// element.
+//
+// When a callback is provided instead, it will write the contents of the
+// respective element directly to the output buffer and return true on success.
+// If the callback returns false, the rendering function should reset the
+// output buffer as though it had never been called.
+//
+// Currently Html and Latex implementations are provided
+type Renderer interface {
+ // block-level callbacks
+ BlockCode(out *bytes.Buffer, text []byte, lang string)
+ BlockQuote(out *bytes.Buffer, text []byte)
+ BlockHtml(out *bytes.Buffer, text []byte)
+ Header(out *bytes.Buffer, text func() bool, level int, id string)
+ HRule(out *bytes.Buffer)
+ List(out *bytes.Buffer, text func() bool, flags int)
+ ListItem(out *bytes.Buffer, text []byte, flags int)
+ Paragraph(out *bytes.Buffer, text func() bool)
+ Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
+ TableRow(out *bytes.Buffer, text []byte)
+ TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
+ TableCell(out *bytes.Buffer, text []byte, flags int)
+ Footnotes(out *bytes.Buffer, text func() bool)
+ FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
+ TitleBlock(out *bytes.Buffer, text []byte)
+
+ // Span-level callbacks
+ AutoLink(out *bytes.Buffer, link []byte, kind int)
+ CodeSpan(out *bytes.Buffer, text []byte)
+ DoubleEmphasis(out *bytes.Buffer, text []byte)
+ Emphasis(out *bytes.Buffer, text []byte)
+ Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
+ LineBreak(out *bytes.Buffer)
+ Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
+ RawHtmlTag(out *bytes.Buffer, tag []byte)
+ TripleEmphasis(out *bytes.Buffer, text []byte)
+ StrikeThrough(out *bytes.Buffer, text []byte)
+ FootnoteRef(out *bytes.Buffer, ref []byte, id int)
+
+ // Low-level callbacks
+ Entity(out *bytes.Buffer, entity []byte)
+ NormalText(out *bytes.Buffer, text []byte)
+
+ // Header and footer
+ DocumentHeader(out *bytes.Buffer)
+ DocumentFooter(out *bytes.Buffer)
+
+ GetFlags() int
+}
+
+// Callback functions for inline parsing. One such function is defined
+// for each character that triggers a response when parsing inline data.
+type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
+
+// Parser holds runtime state used by the parser.
+// This is constructed by the Markdown function.
+type parser struct {
+ r Renderer
+ refOverride ReferenceOverrideFunc
+ refs map[string]*reference
+ inlineCallback [256]inlineParser
+ flags int
+ nesting int
+ maxNesting int
+ insideLink bool
+
+ // Footnotes need to be ordered as well as available to quickly check for
+ // presence. If a ref is also a footnote, it's stored both in refs and here
+ // in notes. Slice is nil if footnotes not enabled.
+ notes []*reference
+ notesRecord map[string]struct{}
+}
+
+func (p *parser) getRef(refid string) (ref *reference, found bool) {
+ if p.refOverride != nil {
+ r, overridden := p.refOverride(refid)
+ if overridden {
+ if r == nil {
+ return nil, false
+ }
+ return &reference{
+ link: []byte(r.Link),
+ title: []byte(r.Title),
+ noteId: 0,
+ hasBlock: false,
+ text: []byte(r.Text)}, true
+ }
+ }
+ // refs are case insensitive
+ ref, found = p.refs[strings.ToLower(refid)]
+ return ref, found
+}
+
+func (p *parser) isFootnote(ref *reference) bool {
+ _, ok := p.notesRecord[string(ref.link)]
+ return ok
+}
+
+//
+//
+// Public interface
+//
+//
+
+// Reference represents the details of a link.
+// See the documentation in Options for more details on use-case.
+type Reference struct {
+ // Link is usually the URL the reference points to.
+ Link string
+ // Title is the alternate text describing the link in more detail.
+ Title string
+ // Text is the optional text to override the ref with if the syntax used was
+ // [refid][]
+ Text string
+}
+
+// ReferenceOverrideFunc is expected to be called with a reference string and
+// return either a valid Reference type that the reference string maps to or
+// nil. If overridden is false, the default reference logic will be executed.
+// See the documentation in Options for more details on use-case.
+type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
+
+// Options represents configurable overrides and callbacks (in addition to the
+// extension flag set) for configuring a Markdown parse.
+type Options struct {
+ // Extensions is a flag set of bit-wise ORed extension bits. See the
+ // EXTENSION_* flags defined in this package.
+ Extensions int
+
+ // ReferenceOverride is an optional function callback that is called every
+ // time a reference is resolved.
+ //
+ // In Markdown, the link reference syntax can be made to resolve a link to
+ // a reference instead of an inline URL, in one of the following ways:
+ //
+ // * [link text][refid]
+ // * [refid][]
+ //
+ // Usually, the refid is defined at the bottom of the Markdown document. If
+ // this override function is provided, the refid is passed to the override
+ // function first, before consulting the defined refids at the bottom. If
+ // the override function indicates an override did not occur, the refids at
+ // the bottom will be used to fill in the link details.
+ ReferenceOverride ReferenceOverrideFunc
+}
+
+// MarkdownBasic is a convenience function for simple rendering.
+// It processes markdown input with no extensions enabled.
+func MarkdownBasic(input []byte) []byte {
+ // set up the HTML renderer
+ htmlFlags := HTML_USE_XHTML
+ renderer := HtmlRenderer(htmlFlags, "", "")
+
+ // set up the parser
+ return MarkdownOptions(input, renderer, Options{Extensions: 0})
+}
+
+// Call Markdown with most useful extensions enabled
+// MarkdownCommon is a convenience function for simple rendering.
+// It processes markdown input with common extensions enabled, including:
+//
+// * Smartypants processing with smart fractions and LaTeX dashes
+//
+// * Intra-word emphasis suppression
+//
+// * Tables
+//
+// * Fenced code blocks
+//
+// * Autolinking
+//
+// * Strikethrough support
+//
+// * Strict header parsing
+//
+// * Custom Header IDs
+func MarkdownCommon(input []byte) []byte {
+ // set up the HTML renderer
+ renderer := HtmlRenderer(commonHtmlFlags, "", "")
+ return MarkdownOptions(input, renderer, Options{
+ Extensions: commonExtensions})
+}
+
+// Markdown is the main rendering function.
+// It parses and renders a block of markdown-encoded text.
+// The supplied Renderer is used to format the output, and extensions dictates
+// which non-standard extensions are enabled.
+//
+// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
+// LatexRenderer, respectively.
+func Markdown(input []byte, renderer Renderer, extensions int) []byte {
+ return MarkdownOptions(input, renderer, Options{
+ Extensions: extensions})
+}
+
+// MarkdownOptions is just like Markdown but takes additional options through
+// the Options struct.
+func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte {
+ // no point in parsing if we can't render
+ if renderer == nil {
+ return nil
+ }
+
+ extensions := opts.Extensions
+
+ // fill in the render structure
+ p := new(parser)
+ p.r = renderer
+ p.flags = extensions
+ p.refOverride = opts.ReferenceOverride
+ p.refs = make(map[string]*reference)
+ p.maxNesting = 16
+ p.insideLink = false
+
+ // register inline parsers
+ p.inlineCallback['*'] = emphasis
+ p.inlineCallback['_'] = emphasis
+ if extensions&EXTENSION_STRIKETHROUGH != 0 {
+ p.inlineCallback['~'] = emphasis
+ }
+ p.inlineCallback['`'] = codeSpan
+ p.inlineCallback['\n'] = lineBreak
+ p.inlineCallback['['] = link
+ p.inlineCallback['<'] = leftAngle
+ p.inlineCallback['\\'] = escape
+ p.inlineCallback['&'] = entity
+
+ if extensions&EXTENSION_AUTOLINK != 0 {
+ p.inlineCallback[':'] = autoLink
+ }
+
+ if extensions&EXTENSION_FOOTNOTES != 0 {
+ p.notes = make([]*reference, 0)
+ p.notesRecord = make(map[string]struct{})
+ }
+
+ first := firstPass(p, input)
+ second := secondPass(p, first)
+ return second
+}
+
+// first pass:
+// - normalize newlines
+// - extract references (outside of fenced code blocks)
+// - expand tabs (outside of fenced code blocks)
+// - copy everything else
+func firstPass(p *parser, input []byte) []byte {
+ var out bytes.Buffer
+ tabSize := TAB_SIZE_DEFAULT
+ if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
+ tabSize = TAB_SIZE_EIGHT
+ }
+ beg := 0
+ lastFencedCodeBlockEnd := 0
+ for beg < len(input) {
+ // Find end of this line, then process the line.
+ end := beg
+ for end < len(input) && input[end] != '\n' && input[end] != '\r' {
+ end++
+ }
+
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ // track fenced code block boundaries to suppress tab expansion
+ // and reference extraction inside them:
+ if beg >= lastFencedCodeBlockEnd {
+ if i := p.fencedCodeBlock(&out, input[beg:], false); i > 0 {
+ lastFencedCodeBlockEnd = beg + i
+ }
+ }
+ }
+
+ // add the line body if present
+ if end > beg {
+ if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
+ out.Write(input[beg:end])
+ } else if refEnd := isReference(p, input[beg:], tabSize); refEnd > 0 {
+ beg += refEnd
+ continue
+ } else {
+ expandTabs(&out, input[beg:end], tabSize)
+ }
+ }
+
+ if end < len(input) && input[end] == '\r' {
+ end++
+ }
+ if end < len(input) && input[end] == '\n' {
+ end++
+ }
+ out.WriteByte('\n')
+
+ beg = end
+ }
+
+ // empty input?
+ if out.Len() == 0 {
+ out.WriteByte('\n')
+ }
+
+ return out.Bytes()
+}
+
+// second pass: actual rendering
+func secondPass(p *parser, input []byte) []byte {
+ var output bytes.Buffer
+
+ p.r.DocumentHeader(&output)
+ p.block(&output, input)
+
+ if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
+ p.r.Footnotes(&output, func() bool {
+ flags := LIST_ITEM_BEGINNING_OF_LIST
+ for i := 0; i < len(p.notes); i += 1 {
+ ref := p.notes[i]
+ var buf bytes.Buffer
+ if ref.hasBlock {
+ flags |= LIST_ITEM_CONTAINS_BLOCK
+ p.block(&buf, ref.title)
+ } else {
+ p.inline(&buf, ref.title)
+ }
+ p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
+ flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ return true
+ })
+ }
+
+ p.r.DocumentFooter(&output)
+
+ if p.nesting != 0 {
+ panic("Nesting level did not end at zero")
+ }
+
+ return output.Bytes()
+}
+
+//
+// Link references
+//
+// This section implements support for references that (usually) appear
+// as footnotes in a document, and can be referenced anywhere in the document.
+// The basic format is:
+//
+// [1]: http://www.google.com/ "Google"
+// [2]: http://www.github.com/ "Github"
+//
+// Anywhere in the document, the reference can be linked by referring to its
+// label, i.e., 1 and 2 in this example, as in:
+//
+// This library is hosted on [Github][2], a git hosting site.
+//
+// Actual footnotes as specified in Pandoc and supported by some other Markdown
+// libraries such as php-markdown are also taken care of. They look like this:
+//
+// This sentence needs a bit of further explanation.[^note]
+//
+// [^note]: This is the explanation.
+//
+// Footnotes should be placed at the end of the document in an ordered list.
+// Inline footnotes such as:
+//
+// Inline footnotes^[Not supported.] also exist.
+//
+// are not yet supported.
+
+// References are parsed and stored in this struct.
+type reference struct {
+ link []byte
+ title []byte
+ noteId int // 0 if not a footnote ref
+ hasBlock bool
+ text []byte
+}
+
+func (r *reference) String() string {
+ return fmt.Sprintf("{link: %q, title: %q, text: %q, noteId: %d, hasBlock: %v}",
+ r.link, r.title, r.text, r.noteId, r.hasBlock)
+}
+
+// Check whether or not data starts with a reference link.
+// If so, it is parsed and stored in the list of references
+// (in the render struct).
+// Returns the number of bytes to skip to move past it,
+// or zero if the first line is not a reference.
+func isReference(p *parser, data []byte, tabSize int) int {
+ // up to 3 optional leading spaces
+ if len(data) < 4 {
+ return 0
+ }
+ i := 0
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ noteId := 0
+
+ // id part: anything but a newline between brackets
+ if data[i] != '[' {
+ return 0
+ }
+ i++
+ if p.flags&EXTENSION_FOOTNOTES != 0 {
+ if i < len(data) && data[i] == '^' {
+ // we can set it to anything here because the proper noteIds will
+ // be assigned later during the second pass. It just has to be != 0
+ noteId = 1
+ i++
+ }
+ }
+ idOffset := i
+ for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
+ i++
+ }
+ if i >= len(data) || data[i] != ']' {
+ return 0
+ }
+ idEnd := i
+
+ // spacer: colon (space | tab)* newline? (space | tab)*
+ i++
+ if i >= len(data) || data[i] != ':' {
+ return 0
+ }
+ i++
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+ i++
+ }
+ if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
+ i++
+ if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
+ i++
+ }
+ }
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+ i++
+ }
+ if i >= len(data) {
+ return 0
+ }
+
+ var (
+ linkOffset, linkEnd int
+ titleOffset, titleEnd int
+ lineEnd int
+ raw []byte
+ hasBlock bool
+ )
+
+ if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
+ linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
+ lineEnd = linkEnd
+ } else {
+ linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
+ }
+ if lineEnd == 0 {
+ return 0
+ }
+
+ // a valid ref has been found
+
+ ref := &reference{
+ noteId: noteId,
+ hasBlock: hasBlock,
+ }
+
+ if noteId > 0 {
+ // reusing the link field for the id since footnotes don't have links
+ ref.link = data[idOffset:idEnd]
+ // if footnote, it's not really a title, it's the contained text
+ ref.title = raw
+ } else {
+ ref.link = data[linkOffset:linkEnd]
+ ref.title = data[titleOffset:titleEnd]
+ }
+
+ // id matches are case-insensitive
+ id := string(bytes.ToLower(data[idOffset:idEnd]))
+
+ p.refs[id] = ref
+
+ return lineEnd
+}
+
+func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
+ // link: whitespace-free sequence, optionally between angle brackets
+ if data[i] == '<' {
+ i++
+ }
+ linkOffset = i
+ if i == len(data) {
+ return
+ }
+ for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
+ i++
+ }
+ linkEnd = i
+ if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
+ linkOffset++
+ linkEnd--
+ }
+
+ // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+ i++
+ }
+ if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
+ return
+ }
+
+ // compute end-of-line
+ if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
+ lineEnd = i
+ }
+ if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
+ lineEnd++
+ }
+
+ // optional (space|tab)* spacer after a newline
+ if lineEnd > 0 {
+ i = lineEnd + 1
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+ i++
+ }
+ }
+
+ // optional title: any non-newline sequence enclosed in '"() alone on its line
+ if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
+ i++
+ titleOffset = i
+
+ // look for EOL
+ for i < len(data) && data[i] != '\n' && data[i] != '\r' {
+ i++
+ }
+ if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
+ titleEnd = i + 1
+ } else {
+ titleEnd = i
+ }
+
+ // step back
+ i--
+ for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
+ i--
+ }
+ if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
+ lineEnd = titleEnd
+ titleEnd = i
+ }
+ }
+
+ return
+}
+
+// The first bit of this logic is the same as (*parser).listItem, but the rest
+// is much simpler. This function simply finds the entire block and shifts it
+// over by one tab if it is indeed a block (just returns the line if it's not).
+// blockEnd is the end of the section in the input buffer, and contents is the
+// extracted text that was shifted over one tab. It will need to be rendered at
+// the end of the document.
+func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
+ if i == 0 || len(data) == 0 {
+ return
+ }
+
+ // skip leading whitespace on first line
+ for i < len(data) && data[i] == ' ' {
+ i++
+ }
+
+ blockStart = i
+
+ // find the end of the line
+ blockEnd = i
+ for i < len(data) && data[i-1] != '\n' {
+ i++
+ }
+
+ // get working buffer
+ var raw bytes.Buffer
+
+ // put the first line into the working buffer
+ raw.Write(data[blockEnd:i])
+ blockEnd = i
+
+ // process the following lines
+ containsBlankLine := false
+
+gatherLines:
+ for blockEnd < len(data) {
+ i++
+
+ // find the end of this line
+ for i < len(data) && data[i-1] != '\n' {
+ i++
+ }
+
+ // if it is an empty line, guess that it is part of this item
+ // and move on to the next line
+ if p.isEmpty(data[blockEnd:i]) > 0 {
+ containsBlankLine = true
+ blockEnd = i
+ continue
+ }
+
+ n := 0
+ if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
+ // this is the end of the block.
+ // we don't want to include this last line in the index.
+ break gatherLines
+ }
+
+ // if there were blank lines before this one, insert a new one now
+ if containsBlankLine {
+ raw.WriteByte('\n')
+ containsBlankLine = false
+ }
+
+ // get rid of that first tab, write to buffer
+ raw.Write(data[blockEnd+n : i])
+ hasBlock = true
+
+ blockEnd = i
+ }
+
+ if data[blockEnd-1] != '\n' {
+ raw.WriteByte('\n')
+ }
+
+ contents = raw.Bytes()
+
+ return
+}
+
+//
+//
+// Miscellaneous helper functions
+//
+//
+
+// Test if a character is a punctuation symbol.
+// Taken from a private function in regexp in the stdlib.
+func ispunct(c byte) bool {
+ for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
+ if c == r {
+ return true
+ }
+ }
+ return false
+}
+
+// Test if a character is a whitespace character.
+func isspace(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
+}
+
+// Test if a character is letter.
+func isletter(c byte) bool {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+// Test if a character is a letter or a digit.
+// TODO: check when this is looking for ASCII alnum and when it should use unicode
+func isalnum(c byte) bool {
+ return (c >= '0' && c <= '9') || isletter(c)
+}
+
+// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
+// always ends output with a newline
+func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
+ // first, check for common cases: no tabs, or only tabs at beginning of line
+ i, prefix := 0, 0
+ slowcase := false
+ for i = 0; i < len(line); i++ {
+ if line[i] == '\t' {
+ if prefix == i {
+ prefix++
+ } else {
+ slowcase = true
+ break
+ }
+ }
+ }
+
+ // no need to decode runes if all tabs are at the beginning of the line
+ if !slowcase {
+ for i = 0; i < prefix*tabSize; i++ {
+ out.WriteByte(' ')
+ }
+ out.Write(line[prefix:])
+ return
+ }
+
+ // the slow case: we need to count runes to figure out how
+ // many spaces to insert for each tab
+ column := 0
+ i = 0
+ for i < len(line) {
+ start := i
+ for i < len(line) && line[i] != '\t' {
+ _, size := utf8.DecodeRune(line[i:])
+ i += size
+ column++
+ }
+
+ if i > start {
+ out.Write(line[start:i])
+ }
+
+ if i >= len(line) {
+ break
+ }
+
+ for {
+ out.WriteByte(' ')
+ column++
+ if column%tabSize == 0 {
+ break
+ }
+ }
+
+ i++
+ }
+}
+
+// Find if a line counts as indented or not.
+// Returns number of characters the indent is (0 = not indented).
+func isIndented(data []byte, indentSize int) int {
+ if len(data) == 0 {
+ return 0
+ }
+ if data[0] == '\t' {
+ return 1
+ }
+ if len(data) < indentSize {
+ return 0
+ }
+ for i := 0; i < indentSize; i++ {
+ if data[i] != ' ' {
+ return 0
+ }
+ }
+ return indentSize
+}
+
+// Create a url-safe slug for fragments
+func slugify(in []byte) []byte {
+ if len(in) == 0 {
+ return in
+ }
+ out := make([]byte, 0, len(in))
+ sym := false
+
+ for _, ch := range in {
+ if isalnum(ch) {
+ sym = false
+ out = append(out, ch)
+ } else if sym {
+ continue
+ } else {
+ out = append(out, '-')
+ sym = true
+ }
+ }
+ var a, b int
+ var ch byte
+ for a, ch = range out {
+ if ch != '-' {
+ break
+ }
+ }
+ for b = len(out) - 1; b > 0; b-- {
+ if out[b] != '-' {
+ break
+ }
+ }
+ return out[a : b+1]
+}
diff --git a/vendor/github.com/russross/blackfriday/smartypants.go b/vendor/github.com/russross/blackfriday/smartypants.go
new file mode 100644
index 00000000..f25bd07d
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/smartypants.go
@@ -0,0 +1,430 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// SmartyPants rendering
+//
+//
+
+package blackfriday
+
+import (
+ "bytes"
+)
+
+type smartypantsData struct {
+ inSingleQuote bool
+ inDoubleQuote bool
+}
+
+func wordBoundary(c byte) bool {
+ return c == 0 || isspace(c) || ispunct(c)
+}
+
+func tolower(c byte) byte {
+ if c >= 'A' && c <= 'Z' {
+ return c - 'A' + 'a'
+ }
+ return c
+}
+
+func isdigit(c byte) bool {
+ return c >= '0' && c <= '9'
+}
+
+func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
+ // edge of the buffer is likely to be a tag that we don't get to see,
+ // so we treat it like text sometimes
+
+ // enumerate all sixteen possibilities for (previousChar, nextChar)
+ // each can be one of {0, space, punct, other}
+ switch {
+ case previousChar == 0 && nextChar == 0:
+ // context is not any help here, so toggle
+ *isOpen = !*isOpen
+ case isspace(previousChar) && nextChar == 0:
+ // [ "] might be [ "<code>foo...]
+ *isOpen = true
+ case ispunct(previousChar) && nextChar == 0:
+ // [!"] hmm... could be [Run!"] or [("<code>...]
+ *isOpen = false
+ case /* isnormal(previousChar) && */ nextChar == 0:
+ // [a"] is probably a close
+ *isOpen = false
+ case previousChar == 0 && isspace(nextChar):
+ // [" ] might be [...foo</code>" ]
+ *isOpen = false
+ case isspace(previousChar) && isspace(nextChar):
+ // [ " ] context is not any help here, so toggle
+ *isOpen = !*isOpen
+ case ispunct(previousChar) && isspace(nextChar):
+ // [!" ] is probably a close
+ *isOpen = false
+ case /* isnormal(previousChar) && */ isspace(nextChar):
+ // [a" ] this is one of the easy cases
+ *isOpen = false
+ case previousChar == 0 && ispunct(nextChar):
+ // ["!] hmm... could be ["$1.95] or [</code>"!...]
+ *isOpen = false
+ case isspace(previousChar) && ispunct(nextChar):
+ // [ "!] looks more like [ "$1.95]
+ *isOpen = true
+ case ispunct(previousChar) && ispunct(nextChar):
+ // [!"!] context is not any help here, so toggle
+ *isOpen = !*isOpen
+ case /* isnormal(previousChar) && */ ispunct(nextChar):
+ // [a"!] is probably a close
+ *isOpen = false
+ case previousChar == 0 /* && isnormal(nextChar) */ :
+ // ["a] is probably an open
+ *isOpen = true
+ case isspace(previousChar) /* && isnormal(nextChar) */ :
+ // [ "a] this is one of the easy cases
+ *isOpen = true
+ case ispunct(previousChar) /* && isnormal(nextChar) */ :
+ // [!"a] is probably an open
+ *isOpen = true
+ default:
+ // [a'b] maybe a contraction?
+ *isOpen = false
+ }
+
+ // Note that with the limited lookahead, this non-breaking
+ // space will also be appended to single double quotes.
+ if addNBSP && !*isOpen {
+ out.WriteString("&nbsp;")
+ }
+
+ out.WriteByte('&')
+ if *isOpen {
+ out.WriteByte('l')
+ } else {
+ out.WriteByte('r')
+ }
+ out.WriteByte(quote)
+ out.WriteString("quo;")
+
+ if addNBSP && *isOpen {
+ out.WriteString("&nbsp;")
+ }
+
+ return true
+}
+
+func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ if len(text) >= 2 {
+ t1 := tolower(text[1])
+
+ if t1 == '\'' {
+ nextChar := byte(0)
+ if len(text) >= 3 {
+ nextChar = text[2]
+ }
+ if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote, false) {
+ return 1
+ }
+ }
+
+ if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
+ out.WriteString("&rsquo;")
+ return 0
+ }
+
+ if len(text) >= 3 {
+ t2 := tolower(text[2])
+
+ if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
+ (len(text) < 4 || wordBoundary(text[3])) {
+ out.WriteString("&rsquo;")
+ return 0
+ }
+ }
+ }
+
+ nextChar := byte(0)
+ if len(text) > 1 {
+ nextChar = text[1]
+ }
+ if smartQuoteHelper(out, previousChar, nextChar, 's', &smrt.inSingleQuote, false) {
+ return 0
+ }
+
+ out.WriteByte(text[0])
+ return 0
+}
+
+func smartParens(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ if len(text) >= 3 {
+ t1 := tolower(text[1])
+ t2 := tolower(text[2])
+
+ if t1 == 'c' && t2 == ')' {
+ out.WriteString("&copy;")
+ return 2
+ }
+
+ if t1 == 'r' && t2 == ')' {
+ out.WriteString("&reg;")
+ return 2
+ }
+
+ if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
+ out.WriteString("&trade;")
+ return 3
+ }
+ }
+
+ out.WriteByte(text[0])
+ return 0
+}
+
+func smartDash(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ if len(text) >= 2 {
+ if text[1] == '-' {
+ out.WriteString("&mdash;")
+ return 1
+ }
+
+ if wordBoundary(previousChar) && wordBoundary(text[1]) {
+ out.WriteString("&ndash;")
+ return 0
+ }
+ }
+
+ out.WriteByte(text[0])
+ return 0
+}
+
+func smartDashLatex(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
+ out.WriteString("&mdash;")
+ return 2
+ }
+ if len(text) >= 2 && text[1] == '-' {
+ out.WriteString("&ndash;")
+ return 1
+ }
+
+ out.WriteByte(text[0])
+ return 0
+}
+
+func smartAmpVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte, addNBSP bool) int {
+ if bytes.HasPrefix(text, []byte("&quot;")) {
+ nextChar := byte(0)
+ if len(text) >= 7 {
+ nextChar = text[6]
+ }
+ if smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote, addNBSP) {
+ return 5
+ }
+ }
+
+ if bytes.HasPrefix(text, []byte("&#0;")) {
+ return 3
+ }
+
+ out.WriteByte('&')
+ return 0
+}
+
+func smartAmp(angledQuotes, addNBSP bool) func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ var quote byte = 'd'
+ if angledQuotes {
+ quote = 'a'
+ }
+
+ return func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ return smartAmpVariant(out, smrt, previousChar, text, quote, addNBSP)
+ }
+}
+
+func smartPeriod(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
+ out.WriteString("&hellip;")
+ return 2
+ }
+
+ if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
+ out.WriteString("&hellip;")
+ return 4
+ }
+
+ out.WriteByte(text[0])
+ return 0
+}
+
+func smartBacktick(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ if len(text) >= 2 && text[1] == '`' {
+ nextChar := byte(0)
+ if len(text) >= 3 {
+ nextChar = text[2]
+ }
+ if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote, false) {
+ return 1
+ }
+ }
+
+ out.WriteByte(text[0])
+ return 0
+}
+
+func smartNumberGeneric(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
+ // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
+ // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
+ // and avoid changing dates like 1/23/2005 into fractions.
+ numEnd := 0
+ for len(text) > numEnd && isdigit(text[numEnd]) {
+ numEnd++
+ }
+ if numEnd == 0 {
+ out.WriteByte(text[0])
+ return 0
+ }
+ denStart := numEnd + 1
+ if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
+ denStart = numEnd + 3
+ } else if len(text) < numEnd+2 || text[numEnd] != '/' {
+ out.WriteByte(text[0])
+ return 0
+ }
+ denEnd := denStart
+ for len(text) > denEnd && isdigit(text[denEnd]) {
+ denEnd++
+ }
+ if denEnd == denStart {
+ out.WriteByte(text[0])
+ return 0
+ }
+ if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
+ out.WriteString("<sup>")
+ out.Write(text[:numEnd])
+ out.WriteString("</sup>&frasl;<sub>")
+ out.Write(text[denStart:denEnd])
+ out.WriteString("</sub>")
+ return denEnd - 1
+ }
+ }
+
+ out.WriteByte(text[0])
+ return 0
+}
+
+func smartNumber(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
+ if text[0] == '1' && text[1] == '/' && text[2] == '2' {
+ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
+ out.WriteString("&frac12;")
+ return 2
+ }
+ }
+
+ if text[0] == '1' && text[1] == '/' && text[2] == '4' {
+ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
+ out.WriteString("&frac14;")
+ return 2
+ }
+ }
+
+ if text[0] == '3' && text[1] == '/' && text[2] == '4' {
+ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
+ out.WriteString("&frac34;")
+ return 2
+ }
+ }
+ }
+
+ out.WriteByte(text[0])
+ return 0
+}
+
+func smartDoubleQuoteVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte) int {
+ nextChar := byte(0)
+ if len(text) > 1 {
+ nextChar = text[1]
+ }
+ if !smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote, false) {
+ out.WriteString("&quot;")
+ }
+
+ return 0
+}
+
+func smartDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'd')
+}
+
+func smartAngledDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'a')
+}
+
+func smartLeftAngle(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+ i := 0
+
+ for i < len(text) && text[i] != '>' {
+ i++
+ }
+
+ out.Write(text[:i+1])
+ return i
+}
+
+type smartCallback func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int
+
+type smartypantsRenderer [256]smartCallback
+
+var (
+ smartAmpAngled = smartAmp(true, false)
+ smartAmpAngledNBSP = smartAmp(true, true)
+ smartAmpRegular = smartAmp(false, false)
+ smartAmpRegularNBSP = smartAmp(false, true)
+)
+
+func smartypants(flags int) *smartypantsRenderer {
+ r := new(smartypantsRenderer)
+ addNBSP := flags&HTML_SMARTYPANTS_QUOTES_NBSP != 0
+ if flags&HTML_SMARTYPANTS_ANGLED_QUOTES == 0 {
+ r['"'] = smartDoubleQuote
+ if !addNBSP {
+ r['&'] = smartAmpRegular
+ } else {
+ r['&'] = smartAmpRegularNBSP
+ }
+ } else {
+ r['"'] = smartAngledDoubleQuote
+ if !addNBSP {
+ r['&'] = smartAmpAngled
+ } else {
+ r['&'] = smartAmpAngledNBSP
+ }
+ }
+ r['\''] = smartSingleQuote
+ r['('] = smartParens
+ if flags&HTML_SMARTYPANTS_DASHES != 0 {
+ if flags&HTML_SMARTYPANTS_LATEX_DASHES == 0 {
+ r['-'] = smartDash
+ } else {
+ r['-'] = smartDashLatex
+ }
+ }
+ r['.'] = smartPeriod
+ if flags&HTML_SMARTYPANTS_FRACTIONS == 0 {
+ r['1'] = smartNumber
+ r['3'] = smartNumber
+ } else {
+ for ch := '1'; ch <= '9'; ch++ {
+ r[ch] = smartNumberGeneric
+ }
+ }
+ r['<'] = smartLeftAngle
+ r['`'] = smartBacktick
+ return r
+}
diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go
new file mode 100644
index 00000000..baa48118
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/man_docs.go
@@ -0,0 +1,236 @@
+// Copyright 2015 Red Hat Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package doc
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/cpuguy83/go-md2man/md2man"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+// GenManTree will generate a man page for this command and all descendants
+// in the directory given. The header may be nil. This function may not work
+// correctly if your command names have `-` in them. If you have `cmd` with two
+// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
+// it is undefined which help output will be in the file `cmd-sub-third.1`.
+func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error {
+ return GenManTreeFromOpts(cmd, GenManTreeOptions{
+ Header: header,
+ Path: dir,
+ CommandSeparator: "-",
+ })
+}
+
+// GenManTreeFromOpts generates a man page for the command and all descendants.
+// The pages are written to the opts.Path directory.
+func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error {
+ header := opts.Header
+ if header == nil {
+ header = &GenManHeader{}
+ }
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
+ continue
+ }
+ if err := GenManTreeFromOpts(c, opts); err != nil {
+ return err
+ }
+ }
+ section := "1"
+ if header.Section != "" {
+ section = header.Section
+ }
+
+ separator := "_"
+ if opts.CommandSeparator != "" {
+ separator = opts.CommandSeparator
+ }
+ basename := strings.Replace(cmd.CommandPath(), " ", separator, -1)
+ filename := filepath.Join(opts.Path, basename+"."+section)
+ f, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ headerCopy := *header
+ return GenMan(cmd, &headerCopy, f)
+}
+
+// GenManTreeOptions is the options for generating the man pages.
+// Used only in GenManTreeFromOpts.
+type GenManTreeOptions struct {
+ Header *GenManHeader
+ Path string
+ CommandSeparator string
+}
+
+// GenManHeader is a lot like the .TH header at the start of man pages. These
+// include the title, section, date, source, and manual. We will use the
+// current time if Date if unset and will use "Auto generated by spf13/cobra"
+// if the Source is unset.
+type GenManHeader struct {
+ Title string
+ Section string
+ Date *time.Time
+ date string
+ Source string
+ Manual string
+}
+
+// GenMan will generate a man page for the given command and write it to
+// w. The header argument may be nil, however obviously w may not.
+func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error {
+ if header == nil {
+ header = &GenManHeader{}
+ }
+ fillHeader(header, cmd.CommandPath())
+
+ b := genMan(cmd, header)
+ _, err := w.Write(md2man.Render(b))
+ return err
+}
+
+func fillHeader(header *GenManHeader, name string) {
+ if header.Title == "" {
+ header.Title = strings.ToUpper(strings.Replace(name, " ", "\\-", -1))
+ }
+ if header.Section == "" {
+ header.Section = "1"
+ }
+ if header.Date == nil {
+ now := time.Now()
+ header.Date = &now
+ }
+ header.date = (*header.Date).Format("Jan 2006")
+ if header.Source == "" {
+ header.Source = "Auto generated by spf13/cobra"
+ }
+}
+
+func manPreamble(buf *bytes.Buffer, header *GenManHeader, cmd *cobra.Command, dashedName string) {
+ description := cmd.Long
+ if len(description) == 0 {
+ description = cmd.Short
+ }
+
+ buf.WriteString(fmt.Sprintf(`%% %s(%s)%s
+%% %s
+%% %s
+# NAME
+`, header.Title, header.Section, header.date, header.Source, header.Manual))
+ buf.WriteString(fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short))
+ buf.WriteString("# SYNOPSIS\n")
+ buf.WriteString(fmt.Sprintf("**%s**\n\n", cmd.UseLine()))
+ buf.WriteString("# DESCRIPTION\n")
+ buf.WriteString(description + "\n\n")
+}
+
+func manPrintFlags(buf *bytes.Buffer, flags *pflag.FlagSet) {
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if len(flag.Deprecated) > 0 || flag.Hidden {
+ return
+ }
+ format := ""
+ if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
+ format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name)
+ } else {
+ format = fmt.Sprintf("**--%s**", flag.Name)
+ }
+ if len(flag.NoOptDefVal) > 0 {
+ format += "["
+ }
+ if flag.Value.Type() == "string" {
+ // put quotes on the value
+ format += "=%q"
+ } else {
+ format += "=%s"
+ }
+ if len(flag.NoOptDefVal) > 0 {
+ format += "]"
+ }
+ format += "\n\t%s\n\n"
+ buf.WriteString(fmt.Sprintf(format, flag.DefValue, flag.Usage))
+ })
+}
+
+func manPrintOptions(buf *bytes.Buffer, command *cobra.Command) {
+ flags := command.NonInheritedFlags()
+ if flags.HasAvailableFlags() {
+ buf.WriteString("# OPTIONS\n")
+ manPrintFlags(buf, flags)
+ buf.WriteString("\n")
+ }
+ flags = command.InheritedFlags()
+ if flags.HasAvailableFlags() {
+ buf.WriteString("# OPTIONS INHERITED FROM PARENT COMMANDS\n")
+ manPrintFlags(buf, flags)
+ buf.WriteString("\n")
+ }
+}
+
+func genMan(cmd *cobra.Command, header *GenManHeader) []byte {
+ cmd.InitDefaultHelpCmd()
+ cmd.InitDefaultHelpFlag()
+
+ // something like `rootcmd-subcmd1-subcmd2`
+ dashCommandName := strings.Replace(cmd.CommandPath(), " ", "-", -1)
+
+ buf := new(bytes.Buffer)
+
+ manPreamble(buf, header, cmd, dashCommandName)
+ manPrintOptions(buf, cmd)
+ if len(cmd.Example) > 0 {
+ buf.WriteString("# EXAMPLE\n")
+ buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example))
+ }
+ if hasSeeAlso(cmd) {
+ buf.WriteString("# SEE ALSO\n")
+ seealsos := make([]string, 0)
+ if cmd.HasParent() {
+ parentPath := cmd.Parent().CommandPath()
+ dashParentPath := strings.Replace(parentPath, " ", "-", -1)
+ seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section)
+ seealsos = append(seealsos, seealso)
+ cmd.VisitParents(func(c *cobra.Command) {
+ if c.DisableAutoGenTag {
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
+ }
+ })
+ }
+ children := cmd.Commands()
+ sort.Sort(byName(children))
+ for _, c := range children {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
+ continue
+ }
+ seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section)
+ seealsos = append(seealsos, seealso)
+ }
+ buf.WriteString(strings.Join(seealsos, ", ") + "\n")
+ }
+ if !cmd.DisableAutoGenTag {
+ buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")))
+ }
+ return buf.Bytes()
+}
diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.md b/vendor/github.com/spf13/cobra/doc/man_docs.md
new file mode 100644
index 00000000..3709160f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/man_docs.md
@@ -0,0 +1,31 @@
+# Generating Man Pages For Your Own cobra.Command
+
+Generating man pages from a cobra command is incredibly easy. An example is as follows:
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
+)
+
+func main() {
+ cmd := &cobra.Command{
+ Use: "test",
+ Short: "my test program",
+ }
+ header := &doc.GenManHeader{
+ Title: "MINE",
+ Section: "3",
+ }
+ err := doc.GenManTree(cmd, header, "/tmp")
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+That will get you a man page `/tmp/test.3`
diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go
new file mode 100644
index 00000000..d76f6d5e
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/md_docs.go
@@ -0,0 +1,159 @@
+//Copyright 2015 Red Hat Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package doc
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+)
+
+func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error {
+ flags := cmd.NonInheritedFlags()
+ flags.SetOutput(buf)
+ if flags.HasAvailableFlags() {
+ buf.WriteString("### Options\n\n```\n")
+ flags.PrintDefaults()
+ buf.WriteString("```\n\n")
+ }
+
+ parentFlags := cmd.InheritedFlags()
+ parentFlags.SetOutput(buf)
+ if parentFlags.HasAvailableFlags() {
+ buf.WriteString("### Options inherited from parent commands\n\n```\n")
+ parentFlags.PrintDefaults()
+ buf.WriteString("```\n\n")
+ }
+ return nil
+}
+
+// GenMarkdown creates markdown output.
+func GenMarkdown(cmd *cobra.Command, w io.Writer) error {
+ return GenMarkdownCustom(cmd, w, func(s string) string { return s })
+}
+
+// GenMarkdownCustom creates custom markdown output.
+func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error {
+ cmd.InitDefaultHelpCmd()
+ cmd.InitDefaultHelpFlag()
+
+ buf := new(bytes.Buffer)
+ name := cmd.CommandPath()
+
+ short := cmd.Short
+ long := cmd.Long
+ if len(long) == 0 {
+ long = short
+ }
+
+ buf.WriteString("## " + name + "\n\n")
+ buf.WriteString(short + "\n\n")
+ buf.WriteString("### Synopsis\n\n")
+ buf.WriteString(long + "\n\n")
+
+ if cmd.Runnable() {
+ buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine()))
+ }
+
+ if len(cmd.Example) > 0 {
+ buf.WriteString("### Examples\n\n")
+ buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example))
+ }
+
+ if err := printOptions(buf, cmd, name); err != nil {
+ return err
+ }
+ if hasSeeAlso(cmd) {
+ buf.WriteString("### SEE ALSO\n\n")
+ if cmd.HasParent() {
+ parent := cmd.Parent()
+ pname := parent.CommandPath()
+ link := pname + ".md"
+ link = strings.Replace(link, " ", "_", -1)
+ buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short))
+ cmd.VisitParents(func(c *cobra.Command) {
+ if c.DisableAutoGenTag {
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
+ }
+ })
+ }
+
+ children := cmd.Commands()
+ sort.Sort(byName(children))
+
+ for _, child := range children {
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
+ continue
+ }
+ cname := name + " " + child.Name()
+ link := cname + ".md"
+ link = strings.Replace(link, " ", "_", -1)
+ buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short))
+ }
+ buf.WriteString("\n")
+ }
+ if !cmd.DisableAutoGenTag {
+ buf.WriteString("###### Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "\n")
+ }
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenMarkdownTree will generate a markdown page for this command and all
+// descendants in the directory given. The header may be nil.
+// This function may not work correctly if your command names have `-` in them.
+// If you have `cmd` with two subcmds, `sub` and `sub-third`,
+// and `sub` has a subcommand called `third`, it is undefined which
+// help output will be in the file `cmd-sub-third.1`.
+func GenMarkdownTree(cmd *cobra.Command, dir string) error {
+ identity := func(s string) string { return s }
+ emptyStr := func(s string) string { return "" }
+ return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity)
+}
+
+// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but
+// with custom filePrepender and linkHandler.
+func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
+ continue
+ }
+ if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
+ return err
+ }
+ }
+
+ basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".md"
+ filename := filepath.Join(dir, basename)
+ f, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
+ return err
+ }
+ if err := GenMarkdownCustom(cmd, f, linkHandler); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.md b/vendor/github.com/spf13/cobra/doc/md_docs.md
new file mode 100644
index 00000000..56ce9fe8
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/md_docs.md
@@ -0,0 +1,115 @@
+# Generating Markdown Docs For Your Own cobra.Command
+
+Generating man pages from a cobra command is incredibly easy. An example is as follows:
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
+)
+
+func main() {
+ cmd := &cobra.Command{
+ Use: "test",
+ Short: "my test program",
+ }
+ err := doc.GenMarkdownTree(cmd, "/tmp")
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+That will get you a Markdown document `/tmp/test.md`
+
+## Generate markdown docs for the entire command tree
+
+This program can actually generate docs for the kubectl command in the kubernetes project
+
+```go
+package main
+
+import (
+ "log"
+ "io/ioutil"
+ "os"
+
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
+ cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
+
+ "github.com/spf13/cobra/doc"
+)
+
+func main() {
+ kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+ err := doc.GenMarkdownTree(kubectl, "./")
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
+
+## Generate markdown docs for a single command
+
+You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree`
+
+```go
+ out := new(bytes.Buffer)
+ err := doc.GenMarkdown(cmd, out)
+ if err != nil {
+ log.Fatal(err)
+ }
+```
+
+This will write the markdown doc for ONLY "cmd" into the out, buffer.
+
+## Customize the output
+
+Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output:
+
+```go
+func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error {
+ //...
+}
+```
+
+```go
+func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error {
+ //...
+}
+```
+
+The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/):
+
+```go
+const fmTemplate = `---
+date: %s
+title: "%s"
+slug: %s
+url: %s
+---
+`
+
+filePrepender := func(filename string) string {
+ now := time.Now().Format(time.RFC3339)
+ name := filepath.Base(filename)
+ base := strings.TrimSuffix(name, path.Ext(name))
+ url := "/commands/" + strings.ToLower(base) + "/"
+ return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
+}
+```
+
+The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename:
+
+```go
+linkHandler := func(name string) string {
+ base := strings.TrimSuffix(name, path.Ext(name))
+ return "/commands/" + strings.ToLower(base) + "/"
+}
+```
diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go
new file mode 100644
index 00000000..051d8dc8
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go
@@ -0,0 +1,185 @@
+//Copyright 2015 Red Hat Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package doc
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+)
+
+func printOptionsReST(buf *bytes.Buffer, cmd *cobra.Command, name string) error {
+ flags := cmd.NonInheritedFlags()
+ flags.SetOutput(buf)
+ if flags.HasAvailableFlags() {
+ buf.WriteString("Options\n")
+ buf.WriteString("~~~~~~~\n\n::\n\n")
+ flags.PrintDefaults()
+ buf.WriteString("\n")
+ }
+
+ parentFlags := cmd.InheritedFlags()
+ parentFlags.SetOutput(buf)
+ if parentFlags.HasAvailableFlags() {
+ buf.WriteString("Options inherited from parent commands\n")
+ buf.WriteString("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n")
+ parentFlags.PrintDefaults()
+ buf.WriteString("\n")
+ }
+ return nil
+}
+
+// linkHandler for default ReST hyperlink markup
+func defaultLinkHandler(name, ref string) string {
+ return fmt.Sprintf("`%s <%s.rst>`_", name, ref)
+}
+
+// GenReST creates reStructured Text output.
+func GenReST(cmd *cobra.Command, w io.Writer) error {
+ return GenReSTCustom(cmd, w, defaultLinkHandler)
+}
+
+// GenReSTCustom creates custom reStructured Text output.
+func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, string) string) error {
+ cmd.InitDefaultHelpCmd()
+ cmd.InitDefaultHelpFlag()
+
+ buf := new(bytes.Buffer)
+ name := cmd.CommandPath()
+
+ short := cmd.Short
+ long := cmd.Long
+ if len(long) == 0 {
+ long = short
+ }
+ ref := strings.Replace(name, " ", "_", -1)
+
+ buf.WriteString(".. _" + ref + ":\n\n")
+ buf.WriteString(name + "\n")
+ buf.WriteString(strings.Repeat("-", len(name)) + "\n\n")
+ buf.WriteString(short + "\n\n")
+ buf.WriteString("Synopsis\n")
+ buf.WriteString("~~~~~~~~\n\n")
+ buf.WriteString("\n" + long + "\n\n")
+
+ if cmd.Runnable() {
+ buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine()))
+ }
+
+ if len(cmd.Example) > 0 {
+ buf.WriteString("Examples\n")
+ buf.WriteString("~~~~~~~~\n\n")
+ buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " ")))
+ }
+
+ if err := printOptionsReST(buf, cmd, name); err != nil {
+ return err
+ }
+ if hasSeeAlso(cmd) {
+ buf.WriteString("SEE ALSO\n")
+ buf.WriteString("~~~~~~~~\n\n")
+ if cmd.HasParent() {
+ parent := cmd.Parent()
+ pname := parent.CommandPath()
+ ref = strings.Replace(pname, " ", "_", -1)
+ buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short))
+ cmd.VisitParents(func(c *cobra.Command) {
+ if c.DisableAutoGenTag {
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
+ }
+ })
+ }
+
+ children := cmd.Commands()
+ sort.Sort(byName(children))
+
+ for _, child := range children {
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
+ continue
+ }
+ cname := name + " " + child.Name()
+ ref = strings.Replace(cname, " ", "_", -1)
+ buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short))
+ }
+ buf.WriteString("\n")
+ }
+ if !cmd.DisableAutoGenTag {
+ buf.WriteString("*Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "*\n")
+ }
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenReSTTree will generate a ReST page for this command and all
+// descendants in the directory given.
+// This function may not work correctly if your command names have `-` in them.
+// If you have `cmd` with two subcmds, `sub` and `sub-third`,
+// and `sub` has a subcommand called `third`, it is undefined which
+// help output will be in the file `cmd-sub-third.1`.
+func GenReSTTree(cmd *cobra.Command, dir string) error {
+ emptyStr := func(s string) string { return "" }
+ return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler)
+}
+
+// GenReSTTreeCustom is the the same as GenReSTTree, but
+// with custom filePrepender and linkHandler.
+func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
+ continue
+ }
+ if err := GenReSTTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
+ return err
+ }
+ }
+
+ basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".rst"
+ filename := filepath.Join(dir, basename)
+ f, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
+ return err
+ }
+ if err := GenReSTCustom(cmd, f, linkHandler); err != nil {
+ return err
+ }
+ return nil
+}
+
+// adapted from: https://github.com/kr/text/blob/main/indent.go
+func indentString(s, p string) string {
+ var res []byte
+ b := []byte(s)
+ prefix := []byte(p)
+ bol := true
+ for _, c := range b {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return string(res)
+}
diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.md b/vendor/github.com/spf13/cobra/doc/rest_docs.md
new file mode 100644
index 00000000..6098430e
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/rest_docs.md
@@ -0,0 +1,114 @@
+# Generating ReStructured Text Docs For Your Own cobra.Command
+
+Generating ReST pages from a cobra command is incredibly easy. An example is as follows:
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
+)
+
+func main() {
+ cmd := &cobra.Command{
+ Use: "test",
+ Short: "my test program",
+ }
+ err := doc.GenReSTTree(cmd, "/tmp")
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+That will get you a ReST document `/tmp/test.rst`
+
+## Generate ReST docs for the entire command tree
+
+This program can actually generate docs for the kubectl command in the kubernetes project
+
+```go
+package main
+
+import (
+ "log"
+ "io/ioutil"
+ "os"
+
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
+ cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
+
+ "github.com/spf13/cobra/doc"
+)
+
+func main() {
+ kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+ err := doc.GenReSTTree(kubectl, "./")
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
+
+## Generate ReST docs for a single command
+
+You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenReST` instead of `GenReSTTree`
+
+```go
+ out := new(bytes.Buffer)
+ err := doc.GenReST(cmd, out)
+ if err != nil {
+ log.Fatal(err)
+ }
+```
+
+This will write the ReST doc for ONLY "cmd" into the out, buffer.
+
+## Customize the output
+
+Both `GenReST` and `GenReSTTree` have alternate versions with callbacks to get some control of the output:
+
+```go
+func GenReSTTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error {
+ //...
+}
+```
+
+```go
+func GenReSTCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string, string) string) error {
+ //...
+}
+```
+
+The `filePrepender` will prepend the return value given the full filepath to the rendered ReST file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/):
+
+```go
+const fmTemplate = `---
+date: %s
+title: "%s"
+slug: %s
+url: %s
+---
+`
+filePrepender := func(filename string) string {
+ now := time.Now().Format(time.RFC3339)
+ name := filepath.Base(filename)
+ base := strings.TrimSuffix(name, path.Ext(name))
+ url := "/commands/" + strings.ToLower(base) + "/"
+ return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
+}
+```
+
+The `linkHandler` can be used to customize the rendered links to the commands, given a command name and reference. This is useful while converting rst to html or while generating documentation with tools like Sphinx where `:ref:` is used:
+
+```go
+// Sphinx cross-referencing format
+linkHandler := func(name, ref string) string {
+ return fmt.Sprintf(":ref:`%s <%s>`", name, ref)
+}
+```
diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go
new file mode 100644
index 00000000..8d3dbece
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/util.go
@@ -0,0 +1,51 @@
+// Copyright 2015 Red Hat Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package doc
+
+import (
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+// Test to see if we have a reason to print See Also information in docs
+// Basically this is a test for a parent commend or a subcommand which is
+// both not deprecated and not the autogenerated help command.
+func hasSeeAlso(cmd *cobra.Command) bool {
+ if cmd.HasParent() {
+ return true
+ }
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
+ continue
+ }
+ return true
+ }
+ return false
+}
+
+// Temporary workaround for yaml lib generating incorrect yaml with long strings
+// that do not contain \n.
+func forceMultiLine(s string) string {
+ if len(s) > 60 && !strings.Contains(s, "\n") {
+ s = s + "\n"
+ }
+ return s
+}
+
+type byName []*cobra.Command
+
+func (s byName) Len() int { return len(s) }
+func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
new file mode 100644
index 00000000..ea00af07
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
@@ -0,0 +1,169 @@
+// Copyright 2016 French Ben. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package doc
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "gopkg.in/yaml.v2"
+)
+
+type cmdOption struct {
+ Name string
+ Shorthand string `yaml:",omitempty"`
+ DefaultValue string `yaml:"default_value,omitempty"`
+ Usage string `yaml:",omitempty"`
+}
+
+type cmdDoc struct {
+ Name string
+ Synopsis string `yaml:",omitempty"`
+ Description string `yaml:",omitempty"`
+ Options []cmdOption `yaml:",omitempty"`
+ InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"`
+ Example string `yaml:",omitempty"`
+ SeeAlso []string `yaml:"see_also,omitempty"`
+}
+
+// GenYamlTree creates yaml structured ref files for this command and all descendants
+// in the directory given. This function may not work
+// correctly if your command names have `-` in them. If you have `cmd` with two
+// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
+// it is undefined which help output will be in the file `cmd-sub-third.1`.
+func GenYamlTree(cmd *cobra.Command, dir string) error {
+ identity := func(s string) string { return s }
+ emptyStr := func(s string) string { return "" }
+ return GenYamlTreeCustom(cmd, dir, emptyStr, identity)
+}
+
+// GenYamlTreeCustom creates yaml structured ref files.
+func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
+ continue
+ }
+ if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
+ return err
+ }
+ }
+
+ basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml"
+ filename := filepath.Join(dir, basename)
+ f, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
+ return err
+ }
+ if err := GenYamlCustom(cmd, f, linkHandler); err != nil {
+ return err
+ }
+ return nil
+}
+
+// GenYaml creates yaml output.
+func GenYaml(cmd *cobra.Command, w io.Writer) error {
+ return GenYamlCustom(cmd, w, func(s string) string { return s })
+}
+
+// GenYamlCustom creates custom yaml output.
+func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error {
+ cmd.InitDefaultHelpCmd()
+ cmd.InitDefaultHelpFlag()
+
+ yamlDoc := cmdDoc{}
+ yamlDoc.Name = cmd.CommandPath()
+
+ yamlDoc.Synopsis = forceMultiLine(cmd.Short)
+ yamlDoc.Description = forceMultiLine(cmd.Long)
+
+ if len(cmd.Example) > 0 {
+ yamlDoc.Example = cmd.Example
+ }
+
+ flags := cmd.NonInheritedFlags()
+ if flags.HasFlags() {
+ yamlDoc.Options = genFlagResult(flags)
+ }
+ flags = cmd.InheritedFlags()
+ if flags.HasFlags() {
+ yamlDoc.InheritedOptions = genFlagResult(flags)
+ }
+
+ if hasSeeAlso(cmd) {
+ result := []string{}
+ if cmd.HasParent() {
+ parent := cmd.Parent()
+ result = append(result, parent.CommandPath()+" - "+parent.Short)
+ }
+ children := cmd.Commands()
+ sort.Sort(byName(children))
+ for _, child := range children {
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
+ continue
+ }
+ result = append(result, child.Name()+" - "+child.Short)
+ }
+ yamlDoc.SeeAlso = result
+ }
+
+ final, err := yaml.Marshal(&yamlDoc)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ if _, err := w.Write(final); err != nil {
+ return err
+ }
+ return nil
+}
+
+func genFlagResult(flags *pflag.FlagSet) []cmdOption {
+ var result []cmdOption
+
+ flags.VisitAll(func(flag *pflag.Flag) {
+ // Todo, when we mark a shorthand is deprecated, but specify an empty message.
+ // The flag.ShorthandDeprecated is empty as the shorthand is deprecated.
+ // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok.
+ if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 {
+ opt := cmdOption{
+ flag.Name,
+ flag.Shorthand,
+ flag.DefValue,
+ forceMultiLine(flag.Usage),
+ }
+ result = append(result, opt)
+ } else {
+ opt := cmdOption{
+ Name: flag.Name,
+ DefaultValue: forceMultiLine(flag.DefValue),
+ Usage: forceMultiLine(flag.Usage),
+ }
+ result = append(result, opt)
+ }
+ })
+
+ return result
+}
diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.md b/vendor/github.com/spf13/cobra/doc/yaml_docs.md
new file mode 100644
index 00000000..1a9b7c6a
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.md
@@ -0,0 +1,112 @@
+# Generating Yaml Docs For Your Own cobra.Command
+
+Generating yaml files from a cobra command is incredibly easy. An example is as follows:
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
+)
+
+func main() {
+ cmd := &cobra.Command{
+ Use: "test",
+ Short: "my test program",
+ }
+ err := doc.GenYamlTree(cmd, "/tmp")
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+That will get you a Yaml document `/tmp/test.yaml`
+
+## Generate yaml docs for the entire command tree
+
+This program can actually generate docs for the kubectl command in the kubernetes project
+
+```go
+package main
+
+import (
+ "io/ioutil"
+ "log"
+ "os"
+
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
+ cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
+
+ "github.com/spf13/cobra/doc"
+)
+
+func main() {
+ kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+ err := doc.GenYamlTree(kubectl, "./")
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
+
+## Generate yaml docs for a single command
+
+You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenYaml` instead of `GenYamlTree`
+
+```go
+ out := new(bytes.Buffer)
+ doc.GenYaml(cmd, out)
+```
+
+This will write the yaml doc for ONLY "cmd" into the out, buffer.
+
+## Customize the output
+
+Both `GenYaml` and `GenYamlTree` have alternate versions with callbacks to get some control of the output:
+
+```go
+func GenYamlTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error {
+ //...
+}
+```
+
+```go
+func GenYamlCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error {
+ //...
+}
+```
+
+The `filePrepender` will prepend the return value given the full filepath to the rendered Yaml file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/):
+
+```go
+const fmTemplate = `---
+date: %s
+title: "%s"
+slug: %s
+url: %s
+---
+`
+
+filePrepender := func(filename string) string {
+ now := time.Now().Format(time.RFC3339)
+ name := filepath.Base(filename)
+ base := strings.TrimSuffix(name, path.Ext(name))
+ url := "/commands/" + strings.ToLower(base) + "/"
+ return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
+}
+```
+
+The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename:
+
+```go
+linkHandler := func(name string) string {
+ base := strings.TrimSuffix(name, path.Ext(name))
+ return "/commands/" + strings.ToLower(base) + "/"
+}
+```
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
new file mode 100644
index 00000000..9f556934
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+ - 1.4
+ - 1.5
+ - 1.6
+ - 1.7
+ - 1.8
+ - 1.9
+ - tip
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 00000000..8da58fbf
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 00000000..866d74a7
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 00000000..b50c6e87
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,133 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 00000000..1f7e87e6
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 00000000..e4e56e28
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,775 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ // For an alias node, alias holds the resolved alias.
+ alias *node
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+ doneInit bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ n.children = append(n.children, p.parse())
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ n.alias = p.doc.anchors[n.value]
+ if n.alias == nil {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[*node]bool
+ mapType reflect.Type
+ terrors []string
+ strict bool
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(strict bool) *decoder {
+ d := &decoder{mapType: defaultMapType, strict: strict}
+ d.aliases = make(map[*node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n] = true
+ good = d.unmarshal(n.alias, out)
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == yaml_BINARY_TAG {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ return true
+ }
+ if resolved != nil {
+ out.SetString(n.value)
+ return true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else if tag == yaml_TIMESTAMP_TAG {
+ // It looks like a timestamp but for backward compatibility
+ // reasons we set it as a string, so that code that unmarshals
+ // timestamp-like values into interface{} will continue to
+ // see a string and not a time.Time.
+ // TODO(v3) Drop this.
+ out.Set(reflect.ValueOf(n.value))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ return true
+ }
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ d.setMapIndex(n.children[i+1], out, k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+ if d.strict && out.MapIndex(k) != zeroValue {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+ return
+ }
+ out.SetMapIndex(k, v)
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ var doneFields []bool
+ if d.strict {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.strict {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ d.setMapIndex(n.children[i+1], inlineMap, name, value)
+ } else if d.strict {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 00000000..a1c2cc52
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 00000000..a14435e8
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,362 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ // doneInit holds whether the initial stream_start_event has been
+ // emitted.
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch m := iface.(type) {
+ case time.Time, *time.Time:
+ // Although time.Time implements TextMarshaler,
+ // we don't want to treat it as a string for YAML
+ // purposes because YAML has special support for
+ // timestamps.
+ case Marshaler:
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ case encoding.TextMarshaler:
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.Type() == ptrTimeType {
+ e.timev(tag, in.Elem())
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ if in.Type() == timeType {
+ e.timev(tag, in)
+ } else {
+ e.structv(tag, in)
+ }
+ case reflect.Slice, reflect.Array:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = yaml_BINARY_TAG
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod
new file mode 100644
index 00000000..1934e876
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v2"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 00000000..81d05dfe
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 00000000..7c1f5fac
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 00000000..6c151db6
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,258 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ case yaml_FLOAT_TAG:
+ if rtag == yaml_INT_TAG {
+ switch v := out.(type) {
+ case int64:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ case int:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == yaml_TIMESTAMP_TAG {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return yaml_TIMESTAMP_TAG, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ }
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 00000000..077fd1dd
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2696 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 00000000..4c45e660
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,113 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 00000000..a2dde608
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 00000000..de85aa4c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,466 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, true)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ strict bool
+ parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+ dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder(dec.strict)
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder(strict)
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 00000000..e25cee56
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,738 @@
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 00000000..8110ce3c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}