aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Gopkg.lock76
-rw-r--r--bridge/github/github.go24
-rw-r--r--bridge/github/import.go124
-rw-r--r--vendor/github.com/golang/protobuf/AUTHORS3
-rw-r--r--vendor/github.com/golang/protobuf/CONTRIBUTORS3
-rw-r--r--vendor/github.com/golang/protobuf/LICENSE28
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go253
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go428
-rw-r--r--vendor/github.com/golang/protobuf/proto/discard.go350
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go203
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go300
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go543
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go979
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go314
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go357
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go308
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go544
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_marshal.go2767
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_merge.go654
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_unmarshal.go2051
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go843
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go880
-rw-r--r--vendor/github.com/shurcooL/githubv4/.travis.yml16
-rw-r--r--vendor/github.com/shurcooL/githubv4/LICENSE21
-rw-r--r--vendor/github.com/shurcooL/githubv4/README.md408
-rw-r--r--vendor/github.com/shurcooL/githubv4/doc.go13
-rw-r--r--vendor/github.com/shurcooL/githubv4/enum.go529
-rw-r--r--vendor/github.com/shurcooL/githubv4/gen.go192
-rw-r--r--vendor/github.com/shurcooL/githubv4/githubv4.go56
-rw-r--r--vendor/github.com/shurcooL/githubv4/input.go483
-rw-r--r--vendor/github.com/shurcooL/githubv4/scalar.go139
-rw-r--r--vendor/github.com/shurcooL/go/browser/LICENSE27
-rw-r--r--vendor/github.com/shurcooL/go/ctxhttp/ctxhttp.go78
-rw-r--r--vendor/github.com/shurcooL/graphql/.travis.yml16
-rw-r--r--vendor/github.com/shurcooL/graphql/LICENSE21
-rw-r--r--vendor/github.com/shurcooL/graphql/README.md293
-rw-r--r--vendor/github.com/shurcooL/graphql/doc.go11
-rw-r--r--vendor/github.com/shurcooL/graphql/graphql.go119
-rw-r--r--vendor/github.com/shurcooL/graphql/ident/ident.go240
-rw-r--r--vendor/github.com/shurcooL/graphql/internal/jsonutil/graphql.go308
-rw-r--r--vendor/github.com/shurcooL/graphql/query.go131
-rw-r--r--vendor/github.com/shurcooL/graphql/scalar.go51
-rw-r--r--vendor/golang.org/x/net/AUTHORS3
-rw-r--r--vendor/golang.org/x/net/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/net/LICENSE27
-rw-r--r--vendor/golang.org/x/net/PATENTS22
-rw-r--r--vendor/golang.org/x/net/context/context.go56
-rw-r--r--vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go74
-rw-r--r--vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go147
-rw-r--r--vendor/golang.org/x/net/context/go17.go72
-rw-r--r--vendor/golang.org/x/net/context/go19.go20
-rw-r--r--vendor/golang.org/x/net/context/pre_go17.go300
-rw-r--r--vendor/golang.org/x/net/context/pre_go19.go109
-rw-r--r--vendor/golang.org/x/oauth2/.travis.yml13
-rw-r--r--vendor/golang.org/x/oauth2/AUTHORS3
-rw-r--r--vendor/golang.org/x/oauth2/CONTRIBUTING.md26
-rw-r--r--vendor/golang.org/x/oauth2/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/oauth2/LICENSE27
-rw-r--r--vendor/golang.org/x/oauth2/README.md77
-rw-r--r--vendor/golang.org/x/oauth2/internal/client_appengine.go13
-rw-r--r--vendor/golang.org/x/oauth2/internal/doc.go6
-rw-r--r--vendor/golang.org/x/oauth2/internal/oauth2.go37
-rw-r--r--vendor/golang.org/x/oauth2/internal/token.go272
-rw-r--r--vendor/golang.org/x/oauth2/internal/transport.go34
-rw-r--r--vendor/golang.org/x/oauth2/oauth2.go362
-rw-r--r--vendor/golang.org/x/oauth2/token.go175
-rw-r--r--vendor/golang.org/x/oauth2/transport.go144
-rw-r--r--vendor/google.golang.org/appengine/LICENSE202
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go660
-rw-r--r--vendor/google.golang.org/appengine/internal/api_classic.go169
-rw-r--r--vendor/google.golang.org/appengine/internal/api_common.go123
-rw-r--r--vendor/google.golang.org/appengine/internal/api_pre17.go682
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id.go28
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.pb.go308
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.proto33
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go4367
-rwxr-xr-xvendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto551
-rw-r--r--vendor/google.golang.org/appengine/internal/identity.go14
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_classic.go57
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_vm.go134
-rw-r--r--vendor/google.golang.org/appengine/internal/internal.go110
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.pb.go1313
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.proto150
-rw-r--r--vendor/google.golang.org/appengine/internal/main.go15
-rw-r--r--vendor/google.golang.org/appengine/internal/main_vm.go48
-rw-r--r--vendor/google.golang.org/appengine/internal/metadata.go60
-rw-r--r--vendor/google.golang.org/appengine/internal/net.go56
-rwxr-xr-xvendor/google.golang.org/appengine/internal/regen.sh40
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go361
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto44
-rw-r--r--vendor/google.golang.org/appengine/internal/transaction.go115
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go527
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/urlfetch/urlfetch.go210
94 files changed, 27635 insertions, 15 deletions
diff --git a/Gopkg.lock b/Gopkg.lock
index c4363835..375a31ff 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -66,6 +66,14 @@
version = "v1.7.0"
[[projects]]
+ digest = "1:97df918963298c287643883209a2c3f642e6593379f97ab400c2a2e219ab647d"
+ name = "github.com/golang/protobuf"
+ packages = ["proto"]
+ pruneopts = "UT"
+ revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
+ version = "v1.2.0"
+
+[[projects]]
digest = "1:c79fb010be38a59d657c48c6ba1d003a8aa651fa56b579d959d74573b7dff8e1"
name = "github.com/gorilla/context"
packages = ["."]
@@ -182,6 +190,34 @@
[[projects]]
branch = "master"
+ digest = "1:a1562fb3021983f533a27d02ebf36f1bc1ab327660d611d6e948970b54087792"
+ name = "github.com/shurcooL/githubv4"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "b5f70540eee0ebfb6a27b52fc5b131be76415539"
+
+[[projects]]
+ branch = "master"
+ digest = "1:ddaa79b37808fbd09a1c3f484bba6320c884664dfc25ccc5f2d7d5a82222177c"
+ name = "github.com/shurcooL/go"
+ packages = ["ctxhttp"]
+ pruneopts = "UT"
+ revision = "9e1955d9fb6e1ee2345ba1f5e71669263e719e27"
+
+[[projects]]
+ branch = "master"
+ digest = "1:b69bc7c0c0489f6f3467b737e243620b1a545dbeaf7623a9432b658f7b7ecc58"
+ name = "github.com/shurcooL/graphql"
+ packages = [
+ ".",
+ "ident",
+ "internal/jsonutil",
+ ]
+ pruneopts = "UT"
+ revision = "365899397c9ad12805631fe4c9b2a64be9d74818"
+
+[[projects]]
+ branch = "master"
digest = "1:9d29b003dc5f98647a5dd6754d65c07171fcd35761102ea56ecd3d6993adee7f"
name = "github.com/shurcooL/httpfs"
packages = [
@@ -252,6 +288,28 @@
[[projects]]
branch = "master"
+ digest = "1:d6b719875cf8091fbab38527d81d34e71f4521b9ee9ccfbd4a32cff2ac5af96e"
+ name = "golang.org/x/net"
+ packages = [
+ "context",
+ "context/ctxhttp",
+ ]
+ pruneopts = "UT"
+ revision = "2f5d2388922f370f4355f327fcf4cfe9f5583908"
+
+[[projects]]
+ branch = "master"
+ digest = "1:363b547c971a2b07474c598b6e9ebcb238d556d8a27f37b3895ad20cd50e7281"
+ name = "golang.org/x/oauth2"
+ packages = [
+ ".",
+ "internal",
+ ]
+ pruneopts = "UT"
+ revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
+
+[[projects]]
+ branch = "master"
digest = "1:3364d01296ce7eeca363e3d530ae63a2092d6f8efb85fb3d101e8f6d7de83452"
name = "golang.org/x/sys"
packages = [
@@ -277,6 +335,22 @@
revision = "a434f64ace81347eff0fb4a32bc80a235e0ad762"
[[projects]]
+ digest = "1:6247f76e55a1e1a5c19a81e2d4b4dff6730461eeb5bbb0a16dd4a8ec8637ee93"
+ name = "google.golang.org/appengine"
+ packages = [
+ "internal",
+ "internal/base",
+ "internal/datastore",
+ "internal/log",
+ "internal/remote_api",
+ "internal/urlfetch",
+ "urlfetch",
+ ]
+ pruneopts = "UT"
+ revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
+ version = "v1.2.0"
+
+[[projects]]
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
name = "gopkg.in/yaml.v2"
packages = ["."]
@@ -300,6 +374,7 @@
"github.com/jroimartin/gocui",
"github.com/phayes/freeport",
"github.com/pkg/errors",
+ "github.com/shurcooL/githubv4",
"github.com/shurcooL/httpfs/filter",
"github.com/shurcooL/vfsgen",
"github.com/skratchdot/open-golang/open",
@@ -308,6 +383,7 @@
"github.com/vektah/gqlparser",
"github.com/vektah/gqlparser/ast",
"golang.org/x/crypto/ssh/terminal",
+ "golang.org/x/oauth2",
]
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/bridge/github/github.go b/bridge/github/github.go
index 4f7a3b94..a8371637 100644
--- a/bridge/github/github.go
+++ b/bridge/github/github.go
@@ -1,10 +1,11 @@
package github
import (
- "fmt"
+ "context"
"github.com/MichaelMure/git-bug/bridge/core"
- "github.com/MichaelMure/git-bug/cache"
+ "github.com/shurcooL/githubv4"
+ "golang.org/x/oauth2"
)
func init() {
@@ -25,18 +26,11 @@ func (*Github) Exporter() core.Exporter {
return nil
}
-type githubImporter struct{}
+func buildClient(conf core.Configuration) *githubv4.Client {
+ src := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: conf[keyToken]},
+ )
+ httpClient := oauth2.NewClient(context.TODO(), src)
-func (*githubImporter) ImportAll(repo *cache.RepoCache, conf core.Configuration) error {
- fmt.Println(conf)
- fmt.Println("IMPORT ALL")
-
- return nil
-}
-
-func (*githubImporter) Import(repo *cache.RepoCache, conf core.Configuration, id string) error {
- fmt.Println(conf)
- fmt.Println("IMPORT")
-
- return nil
+ return githubv4.NewClient(httpClient)
}
diff --git a/bridge/github/import.go b/bridge/github/import.go
new file mode 100644
index 00000000..0e6b03d2
--- /dev/null
+++ b/bridge/github/import.go
@@ -0,0 +1,124 @@
+package github
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/MichaelMure/git-bug/bridge/core"
+ "github.com/MichaelMure/git-bug/cache"
+ "github.com/shurcooL/githubv4"
+)
+
+type githubImporter struct{}
+
+func (*githubImporter) ImportAll(repo *cache.RepoCache, conf core.Configuration) error {
+ client := buildClient(conf)
+
+ type Event struct {
+ CreatedAt githubv4.DateTime
+ Actor struct {
+ Login githubv4.String
+ AvatarUrl githubv4.String
+ }
+ }
+
+ var q struct {
+ Repository struct {
+ Issues struct {
+ Nodes []struct {
+ Title string
+ Timeline struct {
+ Nodes []struct {
+ Typename githubv4.String `graphql:"__typename"`
+
+ // Issue
+ IssueComment struct {
+ Author struct {
+ Login githubv4.String
+ AvatarUrl githubv4.String
+ }
+ BodyText githubv4.String
+ CreatedAt githubv4.DateTime
+
+ // TODO: edition
+ } `graphql:"... on IssueComment"`
+
+ // Label
+ LabeledEvent struct {
+ Event
+ Label struct {
+ Color githubv4.String
+ Name githubv4.String
+ }
+ } `graphql:"... on LabeledEvent"`
+ UnlabeledEvent struct {
+ Event
+ Label struct {
+ Color githubv4.String
+ Name githubv4.String
+ }
+ } `graphql:"... on UnlabeledEvent"`
+
+ // Status
+ ClosedEvent struct {
+ Event
+ } `graphql:"... on ClosedEvent"`
+ ReopenedEvent struct {
+ Event
+ } `graphql:"... on ReopenedEvent"`
+
+ // Title
+ RenamedTitleEvent struct {
+ Event
+ CurrentTitle githubv4.String
+ PreviousTitle githubv4.String
+ } `graphql:"... on RenamedTitleEvent"`
+ }
+ PageInfo struct {
+ EndCursor githubv4.String
+ HasNextPage bool
+ }
+ } `graphql:"timeline(first: $timelineFirst, after: $timelineAfter)"`
+ }
+ PageInfo struct {
+ EndCursor githubv4.String
+ HasNextPage bool
+ }
+ } `graphql:"issues(first: $issueFirst, after: $issueAfter)"`
+ } `graphql:"repository(owner: $owner, name: $name)"`
+ }
+
+ variables := map[string]interface{}{
+ "owner": githubv4.String(conf[keyUser]),
+ "name": githubv4.String(conf[keyProject]),
+ "issueFirst": githubv4.Int(1),
+ "issueAfter": (*githubv4.String)(nil),
+ "timelineFirst": githubv4.Int(10),
+ "timelineAfter": (*githubv4.String)(nil),
+ }
+
+ for {
+ err := client.Query(context.TODO(), &q, variables)
+ if err != nil {
+ return err
+ }
+
+ for _, event := range q.Repository.Issues.Nodes[0].Timeline.Nodes {
+ fmt.Println(event)
+ }
+
+ if !q.Repository.Issues.Nodes[0].Timeline.PageInfo.HasNextPage {
+ break
+ }
+ variables["timelineAfter"] = githubv4.NewString(q.Repository.Issues.Nodes[0].Timeline.PageInfo.EndCursor)
+ }
+
+ return nil
+}
+
+func (*githubImporter) Import(repo *cache.RepoCache, conf core.Configuration, id string) error {
+ fmt.Println(conf)
+ fmt.Println("IMPORT")
+
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 00000000..0f646931
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 00000000..3cd3249f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
+ if in.IsNil() {
+ return src
+ }
+ out := reflect.New(in.Type().Elem())
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+ }
+ if in.IsNil() {
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 00000000..d9aa3c42
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,428 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
+ }
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 00000000..dea2617c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type generatedDiscarder interface {
+ XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ if m, ok := m.(generatedDiscarder); ok {
+ m.XXX_DiscardUnknown()
+ return
+ }
+ // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+ // but the master branch has no implementation for InternalMessageInfo,
+ // so it would be more work to replicate that approach.
+ discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+ di := atomicLoadDiscardInfo(&a.discard)
+ if di == nil {
+ di = getDiscardInfo(reflect.TypeOf(m).Elem())
+ atomicStoreDiscardInfo(&a.discard, di)
+ }
+ di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []discardFieldInfo
+ unrecognized field
+}
+
+type discardFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+ discard func(src pointer)
+}
+
+var (
+ discardInfoMap = map[reflect.Type]*discardInfo{}
+ discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+ discardInfoLock.Lock()
+ defer discardInfoLock.Unlock()
+ di := discardInfoMap[t]
+ if di == nil {
+ di = &discardInfo{typ: t}
+ discardInfoMap[t] = di
+ }
+ return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&di.initialized) == 0 {
+ di.computeDiscardInfo()
+ }
+
+ for _, fi := range di.fields {
+ sfp := src.offset(fi.field)
+ fi.discard(sfp)
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+ // Ignore lock since DiscardUnknown is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ DiscardUnknown(m)
+ }
+ }
+ }
+
+ if di.unrecognized.IsValid() {
+ *src.offset(di.unrecognized).toBytes() = nil
+ }
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+ di.lock.Lock()
+ defer di.lock.Unlock()
+ if di.initialized != 0 {
+ return
+ }
+ t := di.typ
+ n := t.NumField()
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ dfi := discardFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+ case isSlice: // E.g., []*pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sps := src.getPointerSlice()
+ for _, sp := range sps {
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ default: // E.g., *pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+ default: // E.g., map[K]V
+ if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+ dfi.discard = func(src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ DiscardUnknown(val.Interface().(Message))
+ }
+ }
+ } else {
+ dfi.discard = func(pointer) {} // Noop
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ dfi.discard = func(src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ DiscardUnknown(sv.Interface().(Message))
+ }
+ }
+ }
+ }
+ default:
+ continue
+ }
+ di.fields = append(di.fields, dfi)
+ }
+
+ di.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ di.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(m); err == nil {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 00000000..3abfed2c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "reflect"
+)
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
+ }
+ return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ siz := Size(pb)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 00000000..d4db5a1c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 == nil && m2 == nil {
+ // Both have only encoded form.
+ if bytes.Equal(e1.enc, e2.enc) {
+ continue
+ }
+ // The bytes are different, but the extensions might still be
+ // equal. We need to decode them to compare.
+ }
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ // If both have only encoded form and the bytes are the same,
+ // it is handled above. We get here when the bytes are different.
+ // We don't know how to decode it, so just compare them as byte
+ // slices.
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ return false
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 00000000..816a3b9d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,543 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+ switch p := p.(type) {
+ case extendableProto:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return p, nil
+ case extendableProtoV1:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return extensionAdapter{p}, nil
+ }
+ // Don't allocate a specific error containing %T:
+ // this is the hot path for Clone and MarshalText.
+ return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+ v := reflect.ValueOf(x)
+ return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, err := extendable(base)
+ if err != nil {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, err := extendable(pb)
+ if err != nil {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok := extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+
+ if extension.ExtendedType != nil {
+ // can only check type if this is a complete descriptor
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ if extension.ExtensionType == nil {
+ // incomplete descriptor
+ return e.enc, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ if extension.ExtensionType == nil {
+ // incomplete descriptor, so no default
+ return nil, ErrMissingExtension
+ }
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ unmarshal := typeUnmarshaler(t, extension.Tag)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate space to store the pointer/slice.
+ value := reflect.New(t).Elem()
+
+ var err error
+ for {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ wire := int(x) & 7
+
+ b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, err := extendable(pb)
+ if err != nil {
+ return err
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 00000000..75565cc6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,979 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+ if e.field == "" {
+ return fmt.Sprintf("proto: required field not set")
+ }
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+ if e.field == "" {
+ return "proto: invalid UTF-8 detected"
+ }
+ return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+ return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+ if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+ return true
+ }
+ if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+ return true
+ }
+ return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+ if err == nil {
+ return true // not an error
+ }
+ if !isNonFatal(err) {
+ return false // fatal error
+ }
+ if nf.E == nil {
+ nf.E = err // store first instance of non-fatal error
+ }
+ return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{vs: vs}
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 00000000..3b6ca41d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,314 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ return marshalMessageSet(exts, false)
+}
+
+// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
+func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ var u marshalInfo
+ siz := u.sizeMessageSet(exts)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, exts, deterministic)
+
+ case map[int32]Extension:
+ // This is an old-style extension map.
+ // Wrap it in a new-style XXX_InternalExtensions.
+ ie := XXX_InternalExtensions{
+ p: &struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }{
+ extensionMap: exts,
+ },
+ }
+
+ var u marshalInfo
+ siz := u.sizeMessageSet(&ie)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, &ie, deterministic)
+
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ var mu sync.Locker
+ m, mu = exts.extensionsRead()
+ if m != nil {
+ // Keep the extensions map locked until we're done marshaling to prevent
+ // races between marshaling and unmarshaling the lazily-{en,de}coded
+ // values.
+ mu.Lock()
+ defer mu.Unlock()
+ }
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+
+ if i > 0 && b.Len() > 1 {
+ b.WriteByte(',')
+ }
+
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 00000000..b6cad908
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,357 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "reflect"
+ "sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+ return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
+ if n < m {
+ s.SetLen(n + 1)
+ } else {
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+ }
+ return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
+}
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
+ }
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
+ }
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
+ return
+ }
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
+ }
+ p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+ return
+ }
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
+ }
+ p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
+ }
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 00000000..d55a335d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,308 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ }
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+ return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 00000000..50b99b83
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,544 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+
+ mtype reflect.Type // set for map types only
+ MapKeyProp *Properties // set for map types only
+ MapValProp *Properties // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ case "zigzag64":
+ p.WireType = WireVarint
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+outer:
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break outer
+ }
+ }
+ }
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ switch t1 := typ; t1.Kind() {
+ case reflect.Ptr:
+ if t1.Elem().Kind() == reflect.Struct {
+ p.stype = t1.Elem()
+ }
+
+ case reflect.Slice:
+ if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+ p.stype = t2.Elem()
+ }
+
+ case reflect.Map:
+ p.mtype = t1
+ p.MapKeyProp = &Properties{}
+ p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.MapValProp = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ _, _, _, oots = om.XXX_OneofFuncs()
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypedNils[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 00000000..b1679449
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2767 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errLater error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range u.fields {
+ if f.required {
+ if ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name}
+ }
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.sizecache = invalidField
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ u.v1extensions = toField(&f)
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(t, tags, false, false)
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ isptr: t.Kind() == reflect.Ptr,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ }
+ }
+}
+
+type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ validateUTF8 := true
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return sizeStringPtr, appendUTF8StringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendUTF8StringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendUTF8StringValueNoZero
+ }
+ return sizeStringValue, appendUTF8StringValue
+ }
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+
+ // If value is a message with nested maps, calling
+ // valSizer in marshal may be quadratic. We should use
+ // cached version in marshal (but not in size).
+ // If value is not message type, we don't have size cache,
+ // but it cannot be nested either. Just use valSizer.
+ valCachedSizer := valSizer
+ if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+ u := getMarshalInfo(valType.Elem())
+ valCachedSizer = func(ptr pointer, tagsize int) int {
+ // Same as message sizer, but use cache.
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.cachedsize(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ }
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+
+ var nerr nonFatal
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ var nerr nonFatal
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, err := m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 00000000..5525def6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("message field %s without pointer", tf))
+ case isSlice: // E.g., []*pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mi.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mi.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 00000000..ebf1caa5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2051 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1<<len(reqFields)-1
+ unrecognized field // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+ extensions field // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+ oldExtensions field // offset of old-form extensions field (of type map[int]Extension)
+ extensionRanges []ExtensionRange // if non-nil, implies extensions field is valid
+ isMessageSet bool // if true, implies extensions field is valid
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+ // location of the field in the proto message structure.
+ field field
+
+ // function to unmarshal the data for the field.
+ unmarshal unmarshaler
+
+ // if a required field, contains a single set bit at this field's index in the required field list.
+ reqMask uint64
+
+ name string // name of the field, for error reporting
+}
+
+var (
+ unmarshalInfoMap = map[reflect.Type]*unmarshalInfo{}
+ unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+ // It would be correct to return a new unmarshalInfo
+ // unconditionally. We would end up allocating one
+ // per occurrence of that type as a message or submessage.
+ // We use a cache here just to reduce memory usage.
+ unmarshalInfoLock.Lock()
+ defer unmarshalInfoLock.Unlock()
+ u := unmarshalInfoMap[t]
+ if u == nil {
+ u = &unmarshalInfo{typ: t}
+ // Note: we just set the type here. The rest of the fields
+ // will be initialized on first use.
+ unmarshalInfoMap[t] = u
+ }
+ return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeUnmarshalInfo()
+ }
+ if u.isMessageSet {
+ return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+ }
+ var reqMask uint64 // bitmask of required fields we've seen.
+ var errLater error
+ for len(b) > 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ if errLater == nil {
+ errLater = r
+ }
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if reqMask != u.reqMask && errLater == nil {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ errLater = &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ u.oldExtensions = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+ }
+
+ // Find any types associated with oneof fields.
+ // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
+ if fn.IsValid() {
+ res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
+ for i := res.Len() - 1; i >= 0; i-- {
+ v := res.Index(i) // interface{}
+ tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
+ }
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
+ }
+ }
+ }
+ }
+
+ // Get extension ranges, if any.
+ fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0, "")
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+ i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+ n := u.typ.NumField()
+ if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ proto3 := false
+ validateUTF8 := true
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ if tag == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return unmarshalUTF8StringPtr
+ }
+ if slice {
+ return unmarshalUTF8StringSlice
+ }
+ return unmarshalUTF8StringValue
+ }
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ var nerr nonFatal
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if nerr.Merge(err) {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nerr.E
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ var nerr nonFatal
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if !nerr.Merge(err) {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nerr.E
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) <= 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 00000000..1aaee725
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, err := extendable(pv.Interface()); err == nil {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 00000000..bb55a3af
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ return um.UnmarshalText([]byte(s))
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/shurcooL/githubv4/.travis.yml b/vendor/github.com/shurcooL/githubv4/.travis.yml
new file mode 100644
index 00000000..93b1fcdb
--- /dev/null
+++ b/vendor/github.com/shurcooL/githubv4/.travis.yml
@@ -0,0 +1,16 @@
+sudo: false
+language: go
+go:
+ - 1.x
+ - master
+matrix:
+ allow_failures:
+ - go: master
+ fast_finish: true
+install:
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - go tool vet .
+ - go test -v -race ./...
diff --git a/vendor/github.com/shurcooL/githubv4/LICENSE b/vendor/github.com/shurcooL/githubv4/LICENSE
new file mode 100644
index 00000000..ca4c7764
--- /dev/null
+++ b/vendor/github.com/shurcooL/githubv4/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Dmitri Shuralyov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/shurcooL/githubv4/README.md b/vendor/github.com/shurcooL/githubv4/README.md
new file mode 100644
index 00000000..0d389ef0
--- /dev/null
+++ b/vendor/github.com/shurcooL/githubv4/README.md
@@ -0,0 +1,408 @@
+githubv4
+========
+
+[![Build Status](https://travis-ci.org/shurcooL/githubv4.svg?branch=master)](https://travis-ci.org/shurcooL/githubv4) [![GoDoc](https://godoc.org/github.com/shurcooL/githubv4?status.svg)](https://godoc.org/github.com/shurcooL/githubv4)
+
+Package `githubv4` is a client library for accessing GitHub GraphQL API v4 (https://developer.github.com/v4/).
+
+If you're looking for a client library for GitHub REST API v3, the recommended package is [`github.com/google/go-github/github`](https://godoc.org/github.com/google/go-github/github).
+
+**Status:** In research and development. The API will change when opportunities for improvement are discovered; it is not yet frozen.
+
+Focus
+-----
+
+- Friendly, simple and powerful API.
+- Correctness, high performance and efficiency.
+- Support all of GitHub GraphQL API v4 via code generation from schema.
+
+Installation
+------------
+
+`githubv4` requires Go version 1.8 or later.
+
+```bash
+go get -u github.com/shurcooL/githubv4
+```
+
+Usage
+-----
+
+### Authentication
+
+GitHub GraphQL API v4 [requires authentication](https://developer.github.com/v4/guides/forming-calls/#authenticating-with-graphql). The `githubv4` package does not directly handle authentication. Instead, when creating a new client, you're expected to pass an `http.Client` that performs authentication. The easiest and recommended way to do this is to use the [`golang.org/x/oauth2`](https://golang.org/x/oauth2) package. You'll need an OAuth token from GitHub (for example, a [personal API token](https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/)) with the right scopes. Then:
+
+```Go
+import "golang.org/x/oauth2"
+
+func main() {
+ src := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")},
+ )
+ httpClient := oauth2.NewClient(context.Background(), src)
+
+ client := githubv4.NewClient(httpClient)
+ // Use client...
+}
+```
+
+### Simple Query
+
+To make a query, you need to define a Go type that corresponds to the GitHub GraphQL schema, and contains the fields you're interested in querying. You can look up the GitHub GraphQL schema at https://developer.github.com/v4/reference/query/.
+
+For example, to make the following GraphQL query:
+
+```GraphQL
+query {
+ viewer {
+ login
+ createdAt
+ }
+}
+```
+
+You can define this variable:
+
+```Go
+var query struct {
+ Viewer struct {
+ Login githubv4.String
+ CreatedAt githubv4.DateTime
+ }
+}
+```
+
+Then call `client.Query`, passing a pointer to it:
+
+```Go
+err := client.Query(context.Background(), &query, nil)
+if err != nil {
+ // Handle error.
+}
+fmt.Println(" Login:", query.Viewer.Login)
+fmt.Println("CreatedAt:", query.Viewer.CreatedAt)
+
+// Output:
+// Login: gopher
+// CreatedAt: 2017-05-26 21:17:14 +0000 UTC
+```
+
+### Scalar Types
+
+For each scalar in the GitHub GraphQL schema listed at https://developer.github.com/v4/reference/scalar/, there is a corresponding Go type in package `githubv4`.
+
+You can use these types when writing queries:
+
+```Go
+var query struct {
+ Viewer struct {
+ Login githubv4.String
+ CreatedAt githubv4.DateTime
+ IsBountyHunter githubv4.Boolean
+ BioHTML githubv4.HTML
+ WebsiteURL githubv4.URI
+ }
+}
+// Call client.Query() and use results in query...
+```
+
+However, depending on how you're planning to use the results of your query, it's often more convenient to use other Go types.
+
+The `encoding/json` rules are used for converting individual JSON-encoded fields from a GraphQL response into Go values. See https://godoc.org/encoding/json#Unmarshal for details. The [`json.Unmarshaler`](https://godoc.org/encoding/json#Unmarshaler) interface is respected.
+
+That means you can simplify the earlier query by using predeclared Go types:
+
+```Go
+// import "time"
+
+var query struct {
+ Viewer struct {
+ Login string // E.g., "gopher".
+ CreatedAt time.Time // E.g., time.Date(2017, 5, 26, 21, 17, 14, 0, time.UTC).
+ IsBountyHunter bool // E.g., true.
+ BioHTML string // E.g., `I am learning <a href="https://graphql.org">GraphQL</a>!`.
+ WebsiteURL string // E.g., "https://golang.org".
+ }
+}
+// Call client.Query() and use results in query...
+```
+
+The [`DateTime`](https://developer.github.com/v4/reference/scalar/datetime/) scalar is described as "an ISO-8601 encoded UTC date string". If you wanted to fetch in that form without parsing it into a `time.Time`, you can use the `string` type. For example, this would work:
+
+```Go
+// import "html/template"
+
+type MyBoolean bool
+
+var query struct {
+ Viewer struct {
+ Login string // E.g., "gopher".
+ CreatedAt string // E.g., "2017-05-26T21:17:14Z".
+ IsBountyHunter MyBoolean // E.g., MyBoolean(true).
+ BioHTML template.HTML // E.g., template.HTML(`I am learning <a href="https://graphql.org">GraphQL</a>!`).
+ WebsiteURL template.URL // E.g., template.URL("https://golang.org").
+ }
+}
+// Call client.Query() and use results in query...
+```
+
+### Arguments and Variables
+
+Often, you'll want to specify arguments on some fields. You can use the `graphql` struct field tag for this.
+
+For example, to make the following GraphQL query:
+
+```GraphQL
+{
+ repository(owner: "octocat", name: "Hello-World") {
+ description
+ }
+}
+```
+
+You can define this variable:
+
+```Go
+var q struct {
+ Repository struct {
+ Description string
+ } `graphql:"repository(owner: \"octocat\", name: \"Hello-World\")"`
+}
+```
+
+Then call `client.Query`:
+
+```Go
+err := client.Query(context.Background(), &q, nil)
+if err != nil {
+ // Handle error.
+}
+fmt.Println(q.Repository.Description)
+
+// Output:
+// My first repository on GitHub!
+```
+
+However, that'll only work if the arguments are constant and known in advance. Otherwise, you will need to make use of variables. Replace the constants in the struct field tag with variable names:
+
+```Go
+// fetchRepoDescription fetches description of repo with owner and name.
+func fetchRepoDescription(ctx context.Context, owner, name string) (string, error) {
+ var q struct {
+ Repository struct {
+ Description string
+ } `graphql:"repository(owner: $owner, name: $name)"`
+ }
+```
+
+When sending variables to GraphQL, you need to use exact types that match GraphQL scalar types, otherwise the GraphQL server will return an error.
+
+So, define a `variables` map with their values that are converted to GraphQL scalar types:
+
+```Go
+ variables := map[string]interface{}{
+ "owner": githubv4.String(owner),
+ "name": githubv4.String(name),
+ }
+```
+
+Finally, call `client.Query` providing `variables`:
+
+```Go
+ err := client.Query(ctx, &q, variables)
+ return q.Repository.Description, err
+}
+```
+
+### Inline Fragments
+
+Some GraphQL queries contain inline fragments. You can use the `graphql` struct field tag to express them.
+
+For example, to make the following GraphQL query:
+
+```GraphQL
+{
+ repositoryOwner(login: "github") {
+ login
+ ... on Organization {
+ description
+ }
+ ... on User {
+ bio
+ }
+ }
+}
+```
+
+You can define this variable:
+
+```Go
+var q struct {
+ RepositoryOwner struct {
+ Login string
+ Organization struct {
+ Description string
+ } `graphql:"... on Organization"`
+ User struct {
+ Bio string
+ } `graphql:"... on User"`
+ } `graphql:"repositoryOwner(login: \"github\")"`
+}
+```
+
+Alternatively, you can define the struct types corresponding to inline fragments, and use them as embedded fields in your query:
+
+```Go
+type (
+ OrganizationFragment struct {
+ Description string
+ }
+ UserFragment struct {
+ Bio string
+ }
+)
+
+var q struct {
+ RepositoryOwner struct {
+ Login string
+ OrganizationFragment `graphql:"... on Organization"`
+ UserFragment `graphql:"... on User"`
+ } `graphql:"repositoryOwner(login: \"github\")"`
+}
+```
+
+Then call `client.Query`:
+
+```Go
+err := client.Query(context.Background(), &q, nil)
+if err != nil {
+ // Handle error.
+}
+fmt.Println(q.RepositoryOwner.Login)
+fmt.Println(q.RepositoryOwner.Description)
+fmt.Println(q.RepositoryOwner.Bio)
+
+// Output:
+// github
+// How people build software.
+//
+```
+
+### Pagination
+
+Imagine you wanted to get a complete list of comments in an issue, and not just the first 10 or so. To do that, you'll need to perform multiple queries and use pagination information. For example:
+
+```Go
+type comment struct {
+ Body string
+ Author struct {
+ Login string
+ AvatarURL string `graphql:"avatarUrl(size: 72)"`
+ }
+ ViewerCanReact bool
+}
+var q struct {
+ Repository struct {
+ Issue struct {
+ Comments struct {
+ Nodes []comment
+ PageInfo struct {
+ EndCursor githubv4.String
+ HasNextPage bool
+ }
+ } `graphql:"comments(first: 100, after: $commentsCursor)"` // 100 per page.
+ } `graphql:"issue(number: $issueNumber)"`
+ } `graphql:"repository(owner: $repositoryOwner, name: $repositoryName)"`
+}
+variables := map[string]interface{}{
+ "repositoryOwner": githubv4.String(owner),
+ "repositoryName": githubv4.String(name),
+ "issueNumber": githubv4.Int(issue),
+ "commentsCursor": (*githubv4.String)(nil), // Null after argument to get first page.
+}
+
+// Get comments from all pages.
+var allComments []comment
+for {
+ err := s.clQL.Query(ctx, &q, variables)
+ if err != nil {
+ return err
+ }
+ allComments = append(allComments, q.Repository.Issue.Comments.Nodes...)
+ if !q.Repository.Issue.Comments.PageInfo.HasNextPage {
+ break
+ }
+ variables["commentsCursor"] = githubv4.NewString(q.Repository.Issue.Comments.PageInfo.EndCursor)
+}
+```
+
+There is more than one way to perform pagination. Consider additional fields inside [`PageInfo`](https://developer.github.com/v4/reference/object/pageinfo/) object.
+
+### Mutations
+
+Mutations often require information that you can only find out by performing a query first. Let's suppose you've already done that.
+
+For example, to make the following GraphQL mutation:
+
+```GraphQL
+mutation($input: AddReactionInput!) {
+ addReaction(input: $input) {
+ reaction {
+ content
+ }
+ subject {
+ id
+ }
+ }
+}
+variables {
+ "input": {
+ "subjectId": "MDU6SXNzdWUyMTc5NTQ0OTc=",
+ "content": "HOORAY"
+ }
+}
+```
+
+You can define:
+
+```Go
+var m struct {
+ AddReaction struct {
+ Reaction struct {
+ Content githubv4.ReactionContent
+ }
+ Subject struct {
+ ID githubv4.ID
+ }
+ } `graphql:"addReaction(input: $input)"`
+}
+input := githubv4.AddReactionInput{
+ SubjectID: targetIssue.ID, // ID of the target issue from a previous query.
+ Content: githubv4.ReactionContentHooray,
+}
+```
+
+Then call `client.Mutate`:
+
+```Go
+err := client.Mutate(context.Background(), &m, input, nil)
+if err != nil {
+ // Handle error.
+}
+fmt.Printf("Added a %v reaction to subject with ID %#v!\n", m.AddReaction.Reaction.Content, m.AddReaction.Subject.ID)
+
+// Output:
+// Added a HOORAY reaction to subject with ID "MDU6SXNzdWUyMTc5NTQ0OTc="!
+```
+
+Directories
+-----------
+
+| Path | Synopsis |
+|-------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|
+| [example/githubv4dev](https://godoc.org/github.com/shurcooL/githubv4/example/githubv4dev) | githubv4dev is a test program currently being used for developing githubv4 package. |
+
+License
+-------
+
+- [MIT License](LICENSE)
diff --git a/vendor/github.com/shurcooL/githubv4/doc.go b/vendor/github.com/shurcooL/githubv4/doc.go
new file mode 100644
index 00000000..0ce1a508
--- /dev/null
+++ b/vendor/github.com/shurcooL/githubv4/doc.go
@@ -0,0 +1,13 @@
+// Package githubv4 is a client library for accessing GitHub
+// GraphQL API v4 (https://developer.github.com/v4/).
+//
+// If you're looking for a client library for GitHub REST API v3,
+// the recommended package is github.com/google/go-github/github.
+//
+// Status: In active early research and development. The API will change when
+// opportunities for improvement are discovered; it is not yet frozen.
+//
+// For now, see README for more details.
+package githubv4 // import "github.com/shurcooL/githubv4"
+
+//go:generate go run gen.go
diff --git a/vendor/github.com/shurcooL/githubv4/enum.go b/vendor/github.com/shurcooL/githubv4/enum.go
new file mode 100644
index 00000000..8293a23b
--- /dev/null
+++ b/vendor/github.com/shurcooL/githubv4/enum.go
@@ -0,0 +1,529 @@
+// Code generated by gen.go; DO NOT EDIT.
+
+package githubv4
+
+// CollaboratorAffiliation represents collaborators affiliation level with a subject.
+type CollaboratorAffiliation string
+
+// Collaborators affiliation level with a subject.
+const (
+ CollaboratorAffiliationOutside CollaboratorAffiliation = "OUTSIDE" // All outside collaborators of an organization-owned subject.
+ CollaboratorAffiliationDirect CollaboratorAffiliation = "DIRECT" // All collaborators with permissions to an organization-owned subject, regardless of organization membership status.
+ CollaboratorAffiliationAll CollaboratorAffiliation = "ALL" // All collaborators the authenticated user can see.
+)
+
+// CommentAuthorAssociation represents a comment author association with repository.
+type CommentAuthorAssociation string
+
+// A comment author association with repository.
+const (
+ CommentAuthorAssociationMember CommentAuthorAssociation = "MEMBER" // Author is a member of the organization that owns the repository.
+ CommentAuthorAssociationOwner CommentAuthorAssociation = "OWNER" // Author is the owner of the repository.
+ CommentAuthorAssociationCollaborator CommentAuthorAssociation = "COLLABORATOR" // Author has been invited to collaborate on the repository.
+ CommentAuthorAssociationContributor CommentAuthorAssociation = "CONTRIBUTOR" // Author has previously committed to the repository.
+ CommentAuthorAssociationFirstTimeContributor CommentAuthorAssociation = "FIRST_TIME_CONTRIBUTOR" // Author has not previously committed to the repository.
+ CommentAuthorAssociationFirstTimer CommentAuthorAssociation = "FIRST_TIMER" // Author has not previously committed to GitHub.
+ CommentAuthorAssociationNone CommentAuthorAssociation = "NONE" // Author has no association with the repository.
+)
+
+// CommentCannotUpdateReason represents the possible errors that will prevent a user from updating a comment.
+type CommentCannotUpdateReason string
+
+// The possible errors that will prevent a user from updating a comment.
+const (
+ CommentCannotUpdateReasonInsufficientAccess CommentCannotUpdateReason = "INSUFFICIENT_ACCESS" // You must be the author or have write access to this repository to update this comment.
+ CommentCannotUpdateReasonLocked CommentCannotUpdateReason = "LOCKED" // Unable to create comment because issue is locked.
+ CommentCannotUpdateReasonLoginRequired CommentCannotUpdateReason = "LOGIN_REQUIRED" // You must be logged in to update this comment.
+ CommentCannotUpdateReasonMaintenance CommentCannotUpdateReason = "MAINTENANCE" // Repository is under maintenance.
+ CommentCannotUpdateReasonVerifiedEmailRequired CommentCannotUpdateReason = "VERIFIED_EMAIL_REQUIRED" // At least one email address must be verified to update this comment.
+)
+
+// DefaultRepositoryPermissionField represents the possible default permissions for repositories.
+type DefaultRepositoryPermissionField string
+
+// The possible default permissions for repositories.
+const (
+ DefaultRepositoryPermissionFieldNone DefaultRepositoryPermissionField = "NONE" // No access.
+ DefaultRepositoryPermissionFieldRead DefaultRepositoryPermissionField = "READ" // Can read repos by default.
+ DefaultRepositoryPermissionFieldWrite DefaultRepositoryPermissionField = "WRITE" // Can read and write repos by default.
+ DefaultRepositoryPermissionFieldAdmin DefaultRepositoryPermissionField = "ADMIN" // Can read, write, and administrate repos by default.
+)
+
+// DeploymentState represents the possible states in which a deployment can be.
+type DeploymentState string
+
+// The possible states in which a deployment can be.
+const (
+ DeploymentStateAbandoned DeploymentState = "ABANDONED" // The pending deployment was not updated after 30 minutes.
+ DeploymentStateActive DeploymentState = "ACTIVE" // The deployment is currently active.
+ DeploymentStateDestroyed DeploymentState = "DESTROYED" // An inactive transient deployment.
+ DeploymentStateError DeploymentState = "ERROR" // The deployment experienced an error.
+ DeploymentStateFailure DeploymentState = "FAILURE" // The deployment has failed.
+ DeploymentStateInactive DeploymentState = "INACTIVE" // The deployment is inactive.
+ DeploymentStatePending DeploymentState = "PENDING" // The deployment is pending.
+)
+
+// DeploymentStatusState represents the possible states for a deployment status.
+type DeploymentStatusState string
+
+// The possible states for a deployment status.
+const (
+ DeploymentStatusStatePending DeploymentStatusState = "PENDING" // The deployment is pending.
+ DeploymentStatusStateSuccess DeploymentStatusState = "SUCCESS" // The deployment was successful.
+ DeploymentStatusStateFailure DeploymentStatusState = "FAILURE" // The deployment has failed.
+ DeploymentStatusStateInactive DeploymentStatusState = "INACTIVE" // The deployment is inactive.
+ DeploymentStatusStateError DeploymentStatusState = "ERROR" // The deployment experienced an error.
+)
+
+// GistOrderField represents properties by which gist connections can be ordered.
+type GistOrderField string
+
+// Properties by which gist connections can be ordered.
+const (
+ GistOrderFieldCreatedAt GistOrderField = "CREATED_AT" // Order gists by creation time.
+ GistOrderFieldUpdatedAt GistOrderField = "UPDATED_AT" // Order gists by update time.
+ GistOrderFieldPushedAt GistOrderField = "PUSHED_AT" // Order gists by push time.
+)
+
+// GistPrivacy represents the privacy of a Gist.
+type GistPrivacy string
+
+// The privacy of a Gist.
+const (
+ GistPrivacyPublic GistPrivacy = "PUBLIC" // Public.
+ GistPrivacySecret GistPrivacy = "SECRET" // Secret.
+ GistPrivacyAll GistPrivacy = "ALL" // Gists that are public and secret.
+)
+
+// GitSignatureState represents the state of a Git signature.
+type GitSignatureState string
+
+// The state of a Git signature.
+const (
+ GitSignatureStateValid GitSignatureState = "VALID" // Valid signature and verified by GitHub.
+ GitSignatureStateInvalid GitSignatureState = "INVALID" // Invalid signature.
+ GitSignatureStateMalformedSig GitSignatureState = "MALFORMED_SIG" // Malformed signature.
+ GitSignatureStateUnknownKey GitSignatureState = "UNKNOWN_KEY" // Key used for signing not known to GitHub.
+ GitSignatureStateBadEmail GitSignatureState = "BAD_EMAIL" // Invalid email used for signing.
+ GitSignatureStateUnverifiedEmail GitSignatureState = "UNVERIFIED_EMAIL" // Email used for signing unverified on GitHub.
+ GitSignatureStateNoUser GitSignatureState = "NO_USER" // Email used for signing not known to GitHub.
+ GitSignatureStateUnknownSigType GitSignatureState = "UNKNOWN_SIG_TYPE" // Unknown signature type.
+ GitSignatureStateUnsigned GitSignatureState = "UNSIGNED" // Unsigned.
+ GitSignatureStateGpgverifyUnavailable GitSignatureState = "GPGVERIFY_UNAVAILABLE" // Internal error - the GPG verification service is unavailable at the moment.
+ GitSignatureStateGpgverifyError GitSignatureState = "GPGVERIFY_ERROR" // Internal error - the GPG verification service misbehaved.
+ GitSignatureStateNotSigningKey GitSignatureState = "NOT_SIGNING_KEY" // The usage flags for the key that signed this don't allow signing.
+ GitSignatureStateExpiredKey GitSignatureState = "EXPIRED_KEY" // Signing key expired.
+ GitSignatureStateOcspPending GitSignatureState = "OCSP_PENDING" // Valid signature, pending certificate revocation checking.
+ GitSignatureStateOcspError GitSignatureState = "OCSP_ERROR" // Valid siganture, though certificate revocation check failed.
+ GitSignatureStateOcspRevoked GitSignatureState = "OCSP_REVOKED" // One or more certificates in chain has been revoked.
+)
+
+// IssueOrderField represents properties by which issue connections can be ordered.
+type IssueOrderField string
+
+// Properties by which issue connections can be ordered.
+const (
+ IssueOrderFieldCreatedAt IssueOrderField = "CREATED_AT" // Order issues by creation time.
+ IssueOrderFieldUpdatedAt IssueOrderField = "UPDATED_AT" // Order issues by update time.
+ IssueOrderFieldComments IssueOrderField = "COMMENTS" // Order issues by comment count.
+)
+
+// IssuePubSubTopic represents the possible PubSub channels for an issue.
+type IssuePubSubTopic string
+
+// The possible PubSub channels for an issue.
+const (
+ IssuePubSubTopicUpdated IssuePubSubTopic = "UPDATED" // The channel ID for observing issue updates.
+ IssuePubSubTopicMarkasread IssuePubSubTopic = "MARKASREAD" // The channel ID for marking an issue as read.
+ IssuePubSubTopicTimeline IssuePubSubTopic = "TIMELINE" // The channel ID for updating items on the issue timeline.
+)
+
+// IssueState represents the possible states of an issue.
+type IssueState string
+
+// The possible states of an issue.
+const (
+ IssueStateOpen IssueState = "OPEN" // An issue that is still open.
+ IssueStateClosed IssueState = "CLOSED" // An issue that has been closed.
+)
+
+// LanguageOrderField represents properties by which language connections can be ordered.
+type LanguageOrderField string
+
+// Properties by which language connections can be ordered.
+const (
+ LanguageOrderFieldSize LanguageOrderField = "SIZE" // Order languages by the size of all files containing the language.
+)
+
+// LockReason represents the possible reasons that an issue or pull request was locked.
+type LockReason string
+
+// The possible reasons that an issue or pull request was locked.
+const (
+ LockReasonOffTopic LockReason = "OFF_TOPIC" // The issue or pull request was locked because the conversation was off-topic.
+ LockReasonTooHeated LockReason = "TOO_HEATED" // The issue or pull request was locked because the conversation was too heated.
+ LockReasonResolved LockReason = "RESOLVED" // The issue or pull request was locked because the conversation was resolved.
+ LockReasonSpam LockReason = "SPAM" // The issue or pull request was locked because the conversation was spam.
+)
+
+// MergeableState represents whether or not a PullRequest can be merged.
+type MergeableState string
+
+// Whether or not a PullRequest can be merged.
+const (
+ MergeableStateMergeable MergeableState = "MERGEABLE" // The pull request can be merged.
+ MergeableStateConflicting MergeableState = "CONFLICTING" // The pull request cannot be merged due to merge conflicts.
+ MergeableStateUnknown MergeableState = "UNKNOWN" // The mergeability of the pull request is still being calculated.
+)
+
+// MilestoneOrderField represents properties by which milestone connections can be ordered.
+type MilestoneOrderField string
+
+// Properties by which milestone connections can be ordered.
+const (
+ MilestoneOrderFieldDueDate MilestoneOrderField = "DUE_DATE" // Order milestones by when they are due.
+ MilestoneOrderFieldCreatedAt MilestoneOrderField = "CREATED_AT" // Order milestones by when they were created.
+ MilestoneOrderFieldUpdatedAt MilestoneOrderField = "UPDATED_AT" // Order milestones by when they were last updated.
+ MilestoneOrderFieldNumber MilestoneOrderField = "NUMBER" // Order milestones by their number.
+)
+
+// MilestoneState represents the possible states of a milestone.
+type MilestoneState string
+
+// The possible states of a milestone.
+const (
+ MilestoneStateOpen MilestoneState = "OPEN" // A milestone that is still open.
+ MilestoneStateClosed MilestoneState = "CLOSED" // A milestone that has been closed.
+)
+
+// OrderDirection represents possible directions in which to order a list of items when provided an `orderBy` argument.
+type OrderDirection string
+
+// Possible directions in which to order a list of items when provided an `orderBy` argument.
+const (
+ OrderDirectionAsc OrderDirection = "ASC" // Specifies an ascending order for a given `orderBy` argument.
+ OrderDirectionDesc OrderDirection = "DESC" // Specifies a descending order for a given `orderBy` argument.
+)
+
+// OrganizationInvitationRole represents the possible organization invitation roles.
+type OrganizationInvitationRole string
+
+// The possible organization invitation roles.
+const (
+ OrganizationInvitationRoleDirectMember OrganizationInvitationRole = "DIRECT_MEMBER" // The user is invited to be a direct member of the organization.
+ OrganizationInvitationRoleAdmin OrganizationInvitationRole = "ADMIN" // The user is invited to be an admin of the organization.
+ OrganizationInvitationRoleBillingManager OrganizationInvitationRole = "BILLING_MANAGER" // The user is invited to be a billing manager of the organization.
+ OrganizationInvitationRoleReinstate OrganizationInvitationRole = "REINSTATE" // The user's previous role will be reinstated.
+)
+
+// OrganizationInvitationType represents the possible organization invitation types.
+type OrganizationInvitationType string
+
+// The possible organization invitation types.
+const (
+ OrganizationInvitationTypeUser OrganizationInvitationType = "USER" // The invitation was to an existing user.
+ OrganizationInvitationTypeEmail OrganizationInvitationType = "EMAIL" // The invitation was to an email address.
+)
+
+// ProjectCardState represents various content states of a ProjectCard.
+type ProjectCardState string
+
+// Various content states of a ProjectCard.
+const (
+ ProjectCardStateContentOnly ProjectCardState = "CONTENT_ONLY" // The card has content only.
+ ProjectCardStateNoteOnly ProjectCardState = "NOTE_ONLY" // The card has a note only.
+ ProjectCardStateRedacted ProjectCardState = "REDACTED" // The card is redacted.
+)
+
+// ProjectOrderField represents properties by which project connections can be ordered.
+type ProjectOrderField string
+
+// Properties by which project connections can be ordered.
+const (
+ ProjectOrderFieldCreatedAt ProjectOrderField = "CREATED_AT" // Order projects by creation time.
+ ProjectOrderFieldUpdatedAt ProjectOrderField = "UPDATED_AT" // Order projects by update time.
+ ProjectOrderFieldName ProjectOrderField = "NAME" // Order projects by name.
+)
+
+// ProjectState represents state of the project; either 'open' or 'closed'.
+type ProjectState string
+
+// State of the project; either 'open' or 'closed'.
+const (
+ ProjectStateOpen ProjectState = "OPEN" // The project is open.
+ ProjectStateClosed ProjectState = "CLOSED" // The project is closed.
+)
+
+// PullRequestPubSubTopic represents the possible PubSub channels for a pull request.
+type PullRequestPubSubTopic string
+
+// The possible PubSub channels for a pull request.
+const (
+ PullRequestPubSubTopicUpdated PullRequestPubSubTopic = "UPDATED" // The channel ID for observing pull request updates.
+ PullRequestPubSubTopicMarkasread PullRequestPubSubTopic = "MARKASREAD" // The channel ID for marking an pull request as read.
+ PullRequestPubSubTopicHeadRef PullRequestPubSubTopic = "HEAD_REF" // The channel ID for observing head ref updates.
+ PullRequestPubSubTopicTimeline PullRequestPubSubTopic = "TIMELINE" // The channel ID for updating items on the pull request timeline.
+)
+
+// PullRequestReviewEvent represents the possible events to perform on a pull request review.
+type PullRequestReviewEvent string
+
+// The possible events to perform on a pull request review.
+const (
+ PullRequestReviewEventComment PullRequestReviewEvent = "COMMENT" // Submit general feedback without explicit approval.
+ PullRequestReviewEventApprove PullRequestReviewEvent = "APPROVE" // Submit feedback and approve merging these changes.
+ PullRequestReviewEventRequestChanges PullRequestReviewEvent = "REQUEST_CHANGES" // Submit feedback that must be addressed before merging.
+ PullRequestReviewEventDismiss PullRequestReviewEvent = "DISMISS" // Dismiss review so it now longer effects merging.
+)
+
+// PullRequestReviewState represents the possible states of a pull request review.
+type PullRequestReviewState string
+
+// The possible states of a pull request review.
+const (
+ PullRequestReviewStatePending PullRequestReviewState = "PENDING" // A review that has not yet been submitted.
+ PullRequestReviewStateCommented PullRequestReviewState = "COMMENTED" // An informational review.
+ PullRequestReviewStateApproved PullRequestReviewState = "APPROVED" // A review allowing the pull request to merge.
+ PullRequestReviewStateChangesRequested PullRequestReviewState = "CHANGES_REQUESTED" // A review blocking the pull request from merging.
+ PullRequestReviewStateDismissed PullRequestReviewState = "DISMISSED" // A review that has been dismissed.
+)
+
+// PullRequestState represents the possible states of a pull request.
+type PullRequestState string
+
+// The possible states of a pull request.
+const (
+ PullRequestStateOpen PullRequestState = "OPEN" // A pull request that is still open.
+ PullRequestStateClosed PullRequestState = "CLOSED" // A pull request that has been closed without being merged.
+ PullRequestStateMerged PullRequestState = "MERGED" // A pull request that has been closed by being merged.
+)
+
+// ReactionContent represents emojis that can be attached to Issues, Pull Requests and Comments.
+type ReactionContent string
+
+// Emojis that can be attached to Issues, Pull Requests and Comments.
+const (
+ ReactionContentThumbsUp ReactionContent = "THUMBS_UP" // Represents the 👍 emoji.
+ ReactionContentThumbsDown ReactionContent = "THUMBS_DOWN" // Represents the 👎 emoji.
+ ReactionContentLaugh ReactionContent = "LAUGH" // Represents the 😄 emoji.
+ ReactionContentHooray ReactionContent = "HOORAY" // Represents the 🎉 emoji.
+ ReactionContentConfused ReactionContent = "CONFUSED" // Represents the 😕 emoji.
+ ReactionContentHeart ReactionContent = "HEART" // Represents the ❤️ emoji.
+)
+
+// ReactionOrderField represents a list of fields that reactions can be ordered by.
+type ReactionOrderField string
+
+// A list of fields that reactions can be ordered by.
+const (
+ ReactionOrderFieldCreatedAt ReactionOrderField = "CREATED_AT" // Allows ordering a list of reactions by when they were created.
+)
+
+// RefOrderField represents properties by which ref connections can be ordered.
+type RefOrderField string
+
+// Properties by which ref connections can be ordered.
+const (
+ RefOrderFieldTagCommitDate RefOrderField = "TAG_COMMIT_DATE" // Order refs by underlying commit date if the ref prefix is refs/tags/.
+ RefOrderFieldAlphabetical RefOrderField = "ALPHABETICAL" // Order refs by their alphanumeric name.
+)
+
+// ReleaseOrderField represents properties by which release connections can be ordered.
+type ReleaseOrderField string
+
+// Properties by which release connections can be ordered.
+const (
+ ReleaseOrderFieldCreatedAt ReleaseOrderField = "CREATED_AT" // Order releases by creation time.
+ ReleaseOrderFieldName ReleaseOrderField = "NAME" // Order releases alphabetically by name.
+)
+
+// RepositoryAffiliation represents the affiliation of a user to a repository.
+type RepositoryAffiliation string
+
+// The affiliation of a user to a repository.
+const (
+ RepositoryAffiliationOwner RepositoryAffiliation = "OWNER" // Repositories that are owned by the authenticated user.
+ RepositoryAffiliationCollaborator RepositoryAffiliation = "COLLABORATOR" // Repositories that the user has been added to as a collaborator.
+ RepositoryAffiliationOrganizationMember RepositoryAffiliation = "ORGANIZATION_MEMBER" // Repositories that the user has access to through being a member of an organization. This includes every repository on every team that the user is on.
+)
+
+// RepositoryCollaboratorAffiliation represents the affiliation type between collaborator and repository.
+type RepositoryCollaboratorAffiliation string
+
+// The affiliation type between collaborator and repository.
+const (
+ RepositoryCollaboratorAffiliationAll RepositoryCollaboratorAffiliation = "ALL" // All collaborators of the repository.
+ RepositoryCollaboratorAffiliationOutside RepositoryCollaboratorAffiliation = "OUTSIDE" // All outside collaborators of an organization-owned repository.
+)
+
+// RepositoryContributionType represents the reason a repository is listed as 'contributed'.
+type RepositoryContributionType string
+
+// The reason a repository is listed as 'contributed'.
+const (
+ RepositoryContributionTypeCommit RepositoryContributionType = "COMMIT" // Created a commit.
+ RepositoryContributionTypeIssue RepositoryContributionType = "ISSUE" // Created an issue.
+ RepositoryContributionTypePullRequest RepositoryContributionType = "PULL_REQUEST" // Created a pull request.
+ RepositoryContributionTypeRepository RepositoryContributionType = "REPOSITORY" // Created the repository.
+ RepositoryContributionTypePullRequestReview RepositoryContributionType = "PULL_REQUEST_REVIEW" // Reviewed a pull request.
+)
+
+// RepositoryLockReason represents the possible reasons a given repository could be in a locked state.
+type RepositoryLockReason string
+
+// The possible reasons a given repository could be in a locked state.
+const (
+ RepositoryLockReasonMoving RepositoryLockReason = "MOVING" // The repository is locked due to a move.
+ RepositoryLockReasonBilling RepositoryLockReason = "BILLING" // The repository is locked due to a billing related reason.
+ RepositoryLockReasonRename RepositoryLockReason = "RENAME" // The repository is locked due to a rename.
+ RepositoryLockReasonMigrating RepositoryLockReason = "MIGRATING" // The repository is locked due to a migration.
+)
+
+// RepositoryOrderField represents properties by which repository connections can be ordered.
+type RepositoryOrderField string
+
+// Properties by which repository connections can be ordered.
+const (
+ RepositoryOrderFieldCreatedAt RepositoryOrderField = "CREATED_AT" // Order repositories by creation time.
+ RepositoryOrderFieldUpdatedAt RepositoryOrderField = "UPDATED_AT" // Order repositories by update time.
+ RepositoryOrderFieldPushedAt RepositoryOrderField = "PUSHED_AT" // Order repositories by push time.
+ RepositoryOrderFieldName RepositoryOrderField = "NAME" // Order repositories by name.
+ RepositoryOrderFieldStargazers RepositoryOrderField = "STARGAZERS" // Order repositories by number of stargazers.
+)
+
+// RepositoryPermission represents the access level to a repository.
+type RepositoryPermission string
+
+// The access level to a repository.
+const (
+ RepositoryPermissionAdmin RepositoryPermission = "ADMIN" // Can read, clone, push, and add collaborators.
+ RepositoryPermissionWrite RepositoryPermission = "WRITE" // Can read, clone and push.
+ RepositoryPermissionRead RepositoryPermission = "READ" // Can read and clone.
+)
+
+// RepositoryPrivacy represents the privacy of a repository.
+type RepositoryPrivacy string
+
+// The privacy of a repository.
+const (
+ RepositoryPrivacyPublic RepositoryPrivacy = "PUBLIC" // Public.
+ RepositoryPrivacyPrivate RepositoryPrivacy = "PRIVATE" // Private.
+)
+
+// SearchType represents represents the individual results of a search.
+type SearchType string
+
+// Represents the individual results of a search.
+const (
+ SearchTypeIssue SearchType = "ISSUE" // Returns results matching issues in repositories.
+ SearchTypeRepository SearchType = "REPOSITORY" // Returns results matching repositories.
+ SearchTypeUser SearchType = "USER" // Returns results matching users and organizations on GitHub.
+)
+
+// StarOrderField represents properties by which star connections can be ordered.
+type StarOrderField string
+
+// Properties by which star connections can be ordered.
+const (
+ StarOrderFieldStarredAt StarOrderField = "STARRED_AT" // Allows ordering a list of stars by when they were created.
+)
+
+// StatusState represents the possible commit status states.
+type StatusState string
+
+// The possible commit status states.
+const (
+ StatusStateExpected StatusState = "EXPECTED" // Status is expected.
+ StatusStateError StatusState = "ERROR" // Status is errored.
+ StatusStateFailure StatusState = "FAILURE" // Status is failing.
+ StatusStatePending StatusState = "PENDING" // Status is pending.
+ StatusStateSuccess StatusState = "SUCCESS" // Status is successful.
+)
+
+// SubscriptionState represents the possible states of a subscription.
+type SubscriptionState string
+
+// The possible states of a subscription.
+const (
+ SubscriptionStateUnsubscribed SubscriptionState = "UNSUBSCRIBED" // The User is only notified when particpating or @mentioned.
+ SubscriptionStateSubscribed SubscriptionState = "SUBSCRIBED" // The User is notified of all conversations.
+ SubscriptionStateIgnored SubscriptionState = "IGNORED" // The User is never notified.
+)
+
+// TeamMemberOrderField represents properties by which team member connections can be ordered.
+type TeamMemberOrderField string
+
+// Properties by which team member connections can be ordered.
+const (
+ TeamMemberOrderFieldLogin TeamMemberOrderField = "LOGIN" // Order team members by login.
+ TeamMemberOrderFieldCreatedAt TeamMemberOrderField = "CREATED_AT" // Order team members by creation time.
+)
+
+// TeamMemberRole represents the possible team member roles; either 'maintainer' or 'member'.
+type TeamMemberRole string
+
+// The possible team member roles; either 'maintainer' or 'member'.
+const (
+ TeamMemberRoleMaintainer TeamMemberRole = "MAINTAINER" // A team maintainer has permission to add and remove team members.
+ TeamMemberRoleMember TeamMemberRole = "MEMBER" // A team member has no administrative permissions on the team.
+)
+
+// TeamMembershipType represents defines which types of team members are included in the returned list. Can be one of IMMEDIATE, CHILD_TEAM or ALL.
+type TeamMembershipType string
+
+// Defines which types of team members are included in the returned list. Can be one of IMMEDIATE, CHILD_TEAM or ALL.
+const (
+ TeamMembershipTypeImmediate TeamMembershipType = "IMMEDIATE" // Includes only immediate members of the team.
+ TeamMembershipTypeChildTeam TeamMembershipType = "CHILD_TEAM" // Includes only child team members for the team.
+ TeamMembershipTypeAll TeamMembershipType = "ALL" // Includes immediate and child team members for the team.
+)
+
+// TeamOrderField represents properties by which team connections can be ordered.
+type TeamOrderField string
+
+// Properties by which team connections can be ordered.
+const (
+ TeamOrderFieldName TeamOrderField = "NAME" // Allows ordering a list of teams by name.
+)
+
+// TeamPrivacy represents the possible team privacy values.
+type TeamPrivacy string
+
+// The possible team privacy values.
+const (
+ TeamPrivacySecret TeamPrivacy = "SECRET" // A secret team can only be seen by its members.
+ TeamPrivacyVisible TeamPrivacy = "VISIBLE" // A visible team can be seen and @mentioned by every member of the organization.
+)
+
+// TeamRepositoryOrderField represents properties by which team repository connections can be ordered.
+type TeamRepositoryOrderField string
+
+// Properties by which team repository connections can be ordered.
+const (
+ TeamRepositoryOrderFieldCreatedAt TeamRepositoryOrderField = "CREATED_AT" // Order repositories by creation time.
+ TeamRepositoryOrderFieldUpdatedAt TeamRepositoryOrderField = "UPDATED_AT" // Order repositories by update time.
+ TeamRepositoryOrderFieldPushedAt TeamRepositoryOrderField = "PUSHED_AT" // Order repositories by push time.
+ TeamRepositoryOrderFieldName TeamRepositoryOrderField = "NAME" // Order repositories by name.
+ TeamRepositoryOrderFieldPermission TeamRepositoryOrderField = "PERMISSION" // Order repositories by permission.
+ TeamRepositoryOrderFieldStargazers TeamRepositoryOrderField = "STARGAZERS" // Order repositories by number of stargazers.
+)
+
+// TeamRole represents the role of a user on a team.
+type TeamRole string
+
+// The role of a user on a team.
+const (
+ TeamRoleAdmin TeamRole = "ADMIN" // User has admin rights on the team.
+ TeamRoleMember TeamRole = "MEMBER" // User is a member of the team.
+)
+
+// TopicSuggestionDeclineReason represents reason that the suggested topic is declined.
+type TopicSuggestionDeclineReason string
+
+// Reason that the suggested topic is declined.
+const (
+ TopicSuggestionDeclineReasonNotRelevant TopicSuggestionDeclineReason = "NOT_RELEVANT" // The suggested topic is not relevant to the repository.
+ TopicSuggestionDeclineReasonTooSpecific TopicSuggestionDeclineReason = "TOO_SPECIFIC" // The suggested topic is too specific for the repository (e.g. #ruby-on-rails-version-4-2-1).
+ TopicSuggestionDeclineReasonPersonalPreference TopicSuggestionDeclineReason = "PERSONAL_PREFERENCE" // The viewer does not like the suggested topic.
+ TopicSuggestionDeclineReasonTooGeneral TopicSuggestionDeclineReason = "TOO_GENERAL" // The suggested topic is too general for the repository.
+)
diff --git a/vendor/github.com/shurcooL/githubv4/gen.go b/vendor/github.com/shurcooL/githubv4/gen.go
new file mode 100644
index 00000000..43ad1e35
--- /dev/null
+++ b/vendor/github.com/shurcooL/githubv4/gen.go
@@ -0,0 +1,192 @@
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "text/template"
+
+ "github.com/shurcooL/graphql/ident"
+)
+
+func main() {
+ flag.Parse()
+
+ err := run()
+ if err != nil {
+ log.Fatalln(err)
+ }
+}
+
+func run() error {
+ githubToken, ok := os.LookupEnv("GITHUB_TOKEN")
+ if !ok {
+ return fmt.Errorf("GITHUB_TOKEN environment variable not set")
+ }
+ schema, err := loadSchema(githubToken)
+ if err != nil {
+ return err
+ }
+
+ for filename, t := range templates {
+ var buf bytes.Buffer
+ err := t.Execute(&buf, schema)
+ if err != nil {
+ return err
+ }
+ out, err := format.Source(buf.Bytes())
+ if err != nil {
+ log.Println(err)
+ out = []byte("// gofmt error: " + err.Error() + "\n\n" + buf.String())
+ }
+ fmt.Println("writing", filename)
+ err = ioutil.WriteFile(filename, out, 0644)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func loadSchema(githubToken string) (schema interface{}, err error) {
+ req, err := http.NewRequest("GET", "https://api.github.com/graphql", nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Authorization", "bearer "+githubToken)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ err = json.NewDecoder(resp.Body).Decode(&schema)
+ return schema, err
+}
+
+// Filename -> Template.
+var templates = map[string]*template.Template{
+ "enum.go": t(`// Code generated by gen.go; DO NOT EDIT.
+
+package githubv4
+{{range .data.__schema.types | sortByName}}{{if and (eq .kind "ENUM") (not (internal .name))}}
+{{template "enum" .}}
+{{end}}{{end}}
+
+
+{{- define "enum" -}}
+// {{.name}} {{.description | endSentence}}
+type {{.name}} string
+
+// {{.description | fullSentence}}
+const ({{range .enumValues}}
+ {{$.name}}{{.name | enumIdentifier}} {{$.name}} = {{.name | quote}} // {{.description | fullSentence}}{{end}}
+)
+{{- end -}}
+`),
+
+ "input.go": t(`// Code generated by gen.go; DO NOT EDIT.
+
+package githubv4
+
+// Input represents one of the Input structs:
+//
+// {{join (inputObjects .data.__schema.types) ", "}}.
+type Input interface{}
+{{range .data.__schema.types | sortByName}}{{if eq .kind "INPUT_OBJECT"}}
+{{template "inputObject" .}}
+{{end}}{{end}}
+
+
+{{- define "inputObject" -}}
+// {{.name}} {{.description | endSentence}}
+type {{.name}} struct {{"{"}}{{range .inputFields}}{{if eq .type.kind "NON_NULL"}}
+ // {{.description | fullSentence}} (Required.)
+ {{.name | identifier}} {{.type | type}} ` + "`" + `json:"{{.name}}"` + "`" + `{{end}}{{end}}
+{{range .inputFields}}{{if ne .type.kind "NON_NULL"}}
+ // {{.description | fullSentence}} (Optional.)
+ {{.name | identifier}} {{.type | type}} ` + "`" + `json:"{{.name}},omitempty"` + "`" + `{{end}}{{end}}
+}
+{{- end -}}
+`),
+}
+
+func t(text string) *template.Template {
+ // typeString returns a string representation of GraphQL type t.
+ var typeString func(t map[string]interface{}) string
+ typeString = func(t map[string]interface{}) string {
+ switch t["kind"] {
+ case "NON_NULL":
+ s := typeString(t["ofType"].(map[string]interface{}))
+ if !strings.HasPrefix(s, "*") {
+ panic(fmt.Errorf("nullable type %q doesn't begin with '*'", s))
+ }
+ return s[1:] // Strip star from nullable type to make it non-null.
+ case "LIST":
+ return "*[]" + typeString(t["ofType"].(map[string]interface{}))
+ default:
+ return "*" + t["name"].(string)
+ }
+ }
+
+ return template.Must(template.New("").Funcs(template.FuncMap{
+ "internal": func(s string) bool { return strings.HasPrefix(s, "__") },
+ "quote": strconv.Quote,
+ "join": strings.Join,
+ "sortByName": func(types []interface{}) []interface{} {
+ sort.Slice(types, func(i, j int) bool {
+ ni := types[i].(map[string]interface{})["name"].(string)
+ nj := types[j].(map[string]interface{})["name"].(string)
+ return ni < nj
+ })
+ return types
+ },
+ "inputObjects": func(types []interface{}) []string {
+ var names []string
+ for _, t := range types {
+ t := t.(map[string]interface{})
+ if t["kind"].(string) != "INPUT_OBJECT" {
+ continue
+ }
+ names = append(names, t["name"].(string))
+ }
+ sort.Strings(names)
+ return names
+ },
+ "identifier": func(name string) string { return ident.ParseLowerCamelCase(name).ToMixedCaps() },
+ "enumIdentifier": func(name string) string { return ident.ParseScreamingSnakeCase(name).ToMixedCaps() },
+ "type": typeString,
+ "endSentence": func(s string) string {
+ s = strings.ToLower(s[0:1]) + s[1:]
+ switch {
+ default:
+ s = "represents " + s
+ case strings.HasPrefix(s, "autogenerated "):
+ s = "is an " + s
+ case strings.HasPrefix(s, "specifies "):
+ // Do nothing.
+ }
+ if !strings.HasSuffix(s, ".") {
+ s += "."
+ }
+ return s
+ },
+ "fullSentence": func(s string) string {
+ if !strings.HasSuffix(s, ".") {
+ s += "."
+ }
+ return s
+ },
+ }).Parse(text))
+}
diff --git a/vendor/github.com/shurcooL/githubv4/githubv4.go b/vendor/github.com/shurcooL/githubv4/githubv4.go
new file mode 100644
index 00000000..3a544bf2
--- /dev/null
+++ b/vendor/github.com/shurcooL/githubv4/githubv4.go
@@ -0,0 +1,56 @@
+package githubv4
+
+import (
+ "context"
+ "net/http"
+
+ "github.com/shurcooL/graphql"
+)
+
+// Client is a GitHub GraphQL API v4 client.
+type Client struct {
+ client *graphql.Client
+}
+
+// NewClient creates a new GitHub GraphQL API v4 client with the provided http.Client.
+// If httpClient is nil, then http.DefaultClient is used.
+//
+// Note that GitHub GraphQL API v4 requires authentication, so
+// the provided http.Client is expected to take care of that.
+func NewClient(httpClient *http.Client) *Client {
+ return &Client{
+ client: graphql.NewClient("https://api.github.com/graphql", httpClient),
+ }
+}
+
+// NewEnterpriseClient creates a new GitHub GraphQL API v4 client for the GitHub Enterprise
+// instance with the specified GraphQL endpoint URL, using the provided http.Client.
+// If httpClient is nil, then http.DefaultClient is used.
+//
+// Note that GitHub GraphQL API v4 requires authentication, so
+// the provided http.Client is expected to take care of that.
+func NewEnterpriseClient(url string, httpClient *http.Client) *Client {
+ return &Client{
+ client: graphql.NewClient(url, httpClient),
+ }
+}
+
+// Query executes a single GraphQL query request,
+// with a query derived from q, populating the response into it.
+// q should be a pointer to struct that corresponds to the GitHub GraphQL schema.
+func (c *Client) Query(ctx context.Context, q interface{}, variables map[string]interface{}) error {
+ return c.client.Query(ctx, q, variables)
+}
+
+// Mutate executes a single GraphQL mutation request,
+// with a mutation derived from m, populating the response into it.
+// m should be a pointer to struct that corresponds to the GitHub GraphQL schema.
+// Provided input will be set as a variable named "input".
+func (c *Client) Mutate(ctx context.Context, m interface{}, input Input, variables map[string]interface{}) error {
+ if variables == nil {
+ variables = map[string]interface{}{"input": input}
+ } else {
+ variables["input"] = input
+ }
+ return c.client.Mutate(ctx, m, variables)
+}
diff --git a/vendor/github.com/shurcooL/githubv4/input.go b/vendor/github.com/shurcooL/githubv4/input.go
new file mode 100644
index 00000000..ce1976f8
--- /dev/null
+++ b/vendor/github.com/shurcooL/githubv4/input.go
@@ -0,0 +1,483 @@
+// Code generated by gen.go; DO NOT EDIT.
+
+package githubv4
+
+// Input represents one of the Input structs:
+//
+// AcceptTopicSuggestionInput, AddCommentInput, AddProjectCardInput, AddProjectColumnInput, AddPullRequestReviewCommentInput, AddPullRequestReviewInput, AddReactionInput, AddStarInput, CommitAuthor, CreateProjectInput, DeclineTopicSuggestionInput, DeleteProjectCardInput, DeleteProjectColumnInput, DeleteProjectInput, DeletePullRequestReviewInput, DismissPullRequestReviewInput, DraftPullRequestReviewComment, GistOrder, IssueOrder, LanguageOrder, LockLockableInput, MilestoneOrder, MoveProjectCardInput, MoveProjectColumnInput, ProjectOrder, ReactionOrder, RefOrder, ReleaseOrder, RemoveOutsideCollaboratorInput, RemoveReactionInput, RemoveStarInput, RepositoryOrder, RequestReviewsInput, StarOrder, SubmitPullRequestReviewInput, TeamMemberOrder, TeamOrder, TeamRepositoryOrder, UpdateProjectCardInput, UpdateProjectColumnInput, UpdateProjectInput, UpdatePullRequestReviewCommentInput, UpdatePullRequestReviewInput, UpdateSubscriptionInput, UpdateTopicsInput.
+type Input interface{}
+
+// AcceptTopicSuggestionInput is an autogenerated input type of AcceptTopicSuggestion.
+type AcceptTopicSuggestionInput struct {
+ // The Node ID of the repository. (Required.)
+ RepositoryID ID `json:"repositoryId"`
+ // The name of the suggested topic. (Required.)
+ Name String `json:"name"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// AddCommentInput is an autogenerated input type of AddComment.
+type AddCommentInput struct {
+ // The Node ID of the subject to modify. (Required.)
+ SubjectID ID `json:"subjectId"`
+ // The contents of the comment. (Required.)
+ Body String `json:"body"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// AddProjectCardInput is an autogenerated input type of AddProjectCard.
+type AddProjectCardInput struct {
+ // The Node ID of the ProjectColumn. (Required.)
+ ProjectColumnID ID `json:"projectColumnId"`
+
+ // The content of the card. Must be a member of the ProjectCardItem union. (Optional.)
+ ContentID *ID `json:"contentId,omitempty"`
+ // The note on the card. (Optional.)
+ Note *String `json:"note,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// AddProjectColumnInput is an autogenerated input type of AddProjectColumn.
+type AddProjectColumnInput struct {
+ // The Node ID of the project. (Required.)
+ ProjectID ID `json:"projectId"`
+ // The name of the column. (Required.)
+ Name String `json:"name"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// AddPullRequestReviewCommentInput is an autogenerated input type of AddPullRequestReviewComment.
+type AddPullRequestReviewCommentInput struct {
+ // The Node ID of the review to modify. (Required.)
+ PullRequestReviewID ID `json:"pullRequestReviewId"`
+ // The text of the comment. (Required.)
+ Body String `json:"body"`
+
+ // The SHA of the commit to comment on. (Optional.)
+ CommitOID *GitObjectID `json:"commitOID,omitempty"`
+ // The relative path of the file to comment on. (Optional.)
+ Path *String `json:"path,omitempty"`
+ // The line index in the diff to comment on. (Optional.)
+ Position *Int `json:"position,omitempty"`
+ // The comment id to reply to. (Optional.)
+ InReplyTo *ID `json:"inReplyTo,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// AddPullRequestReviewInput is an autogenerated input type of AddPullRequestReview.
+type AddPullRequestReviewInput struct {
+ // The Node ID of the pull request to modify. (Required.)
+ PullRequestID ID `json:"pullRequestId"`
+
+ // The commit OID the review pertains to. (Optional.)
+ CommitOID *GitObjectID `json:"commitOID,omitempty"`
+ // The contents of the review body comment. (Optional.)
+ Body *String `json:"body,omitempty"`
+ // The event to perform on the pull request review. (Optional.)
+ Event *PullRequestReviewEvent `json:"event,omitempty"`
+ // The review line comments. (Optional.)
+ Comments *[]*DraftPullRequestReviewComment `json:"comments,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// AddReactionInput is an autogenerated input type of AddReaction.
+type AddReactionInput struct {
+ // The Node ID of the subject to modify. (Required.)
+ SubjectID ID `json:"subjectId"`
+ // The name of the emoji to react with. (Required.)
+ Content ReactionContent `json:"content"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// AddStarInput is an autogenerated input type of AddStar.
+type AddStarInput struct {
+ // The Starrable ID to star. (Required.)
+ StarrableID ID `json:"starrableId"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// CommitAuthor specifies an author for filtering Git commits.
+type CommitAuthor struct {
+
+ // ID of a User to filter by. If non-null, only commits authored by this user will be returned. This field takes precedence over emails. (Optional.)
+ ID *ID `json:"id,omitempty"`
+ // Email addresses to filter by. Commits authored by any of the specified email addresses will be returned. (Optional.)
+ Emails *[]String `json:"emails,omitempty"`
+}
+
+// CreateProjectInput is an autogenerated input type of CreateProject.
+type CreateProjectInput struct {
+ // The owner ID to create the project under. (Required.)
+ OwnerID ID `json:"ownerId"`
+ // The name of project. (Required.)
+ Name String `json:"name"`
+
+ // The description of project. (Optional.)
+ Body *String `json:"body,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// DeclineTopicSuggestionInput is an autogenerated input type of DeclineTopicSuggestion.
+type DeclineTopicSuggestionInput struct {
+ // The Node ID of the repository. (Required.)
+ RepositoryID ID `json:"repositoryId"`
+ // The name of the suggested topic. (Required.)
+ Name String `json:"name"`
+ // The reason why the suggested topic is declined. (Required.)
+ Reason TopicSuggestionDeclineReason `json:"reason"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// DeleteProjectCardInput is an autogenerated input type of DeleteProjectCard.
+type DeleteProjectCardInput struct {
+ // The id of the card to delete. (Required.)
+ CardID ID `json:"cardId"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// DeleteProjectColumnInput is an autogenerated input type of DeleteProjectColumn.
+type DeleteProjectColumnInput struct {
+ // The id of the column to delete. (Required.)
+ ColumnID ID `json:"columnId"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// DeleteProjectInput is an autogenerated input type of DeleteProject.
+type DeleteProjectInput struct {
+ // The Project ID to update. (Required.)
+ ProjectID ID `json:"projectId"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// DeletePullRequestReviewInput is an autogenerated input type of DeletePullRequestReview.
+type DeletePullRequestReviewInput struct {
+ // The Node ID of the pull request review to delete. (Required.)
+ PullRequestReviewID ID `json:"pullRequestReviewId"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// DismissPullRequestReviewInput is an autogenerated input type of DismissPullRequestReview.
+type DismissPullRequestReviewInput struct {
+ // The Node ID of the pull request review to modify. (Required.)
+ PullRequestReviewID ID `json:"pullRequestReviewId"`
+ // The contents of the pull request review dismissal message. (Required.)
+ Message String `json:"message"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// DraftPullRequestReviewComment specifies a review comment to be left with a Pull Request Review.
+type DraftPullRequestReviewComment struct {
+ // Path to the file being commented on. (Required.)
+ Path String `json:"path"`
+ // Position in the file to leave a comment on. (Required.)
+ Position Int `json:"position"`
+ // Body of the comment to leave. (Required.)
+ Body String `json:"body"`
+}
+
+// GistOrder represents ordering options for gist connections.
+type GistOrder struct {
+ // The field to order repositories by. (Required.)
+ Field GistOrderField `json:"field"`
+ // The ordering direction. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// IssueOrder represents ways in which lists of issues can be ordered upon return.
+type IssueOrder struct {
+ // The field in which to order issues by. (Required.)
+ Field IssueOrderField `json:"field"`
+ // The direction in which to order issues by the specified field. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// LanguageOrder represents ordering options for language connections.
+type LanguageOrder struct {
+ // The field to order languages by. (Required.)
+ Field LanguageOrderField `json:"field"`
+ // The ordering direction. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// LockLockableInput is an autogenerated input type of LockLockable.
+type LockLockableInput struct {
+ // ID of the issue or pull request to be locked. (Required.)
+ LockableID ID `json:"lockableId"`
+
+ // A reason for why the issue or pull request will be locked. (Optional.)
+ LockReason *LockReason `json:"lockReason,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// MilestoneOrder represents ordering options for milestone connections.
+type MilestoneOrder struct {
+ // The field to order milestones by. (Required.)
+ Field MilestoneOrderField `json:"field"`
+ // The ordering direction. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// MoveProjectCardInput is an autogenerated input type of MoveProjectCard.
+type MoveProjectCardInput struct {
+ // The id of the card to move. (Required.)
+ CardID ID `json:"cardId"`
+ // The id of the column to move it into. (Required.)
+ ColumnID ID `json:"columnId"`
+
+ // Place the new card after the card with this id. Pass null to place it at the top. (Optional.)
+ AfterCardID *ID `json:"afterCardId,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// MoveProjectColumnInput is an autogenerated input type of MoveProjectColumn.
+type MoveProjectColumnInput struct {
+ // The id of the column to move. (Required.)
+ ColumnID ID `json:"columnId"`
+
+ // Place the new column after the column with this id. Pass null to place it at the front. (Optional.)
+ AfterColumnID *ID `json:"afterColumnId,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// ProjectOrder represents ways in which lists of projects can be ordered upon return.
+type ProjectOrder struct {
+ // The field in which to order projects by. (Required.)
+ Field ProjectOrderField `json:"field"`
+ // The direction in which to order projects by the specified field. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// ReactionOrder represents ways in which lists of reactions can be ordered upon return.
+type ReactionOrder struct {
+ // The field in which to order reactions by. (Required.)
+ Field ReactionOrderField `json:"field"`
+ // The direction in which to order reactions by the specified field. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// RefOrder represents ways in which lists of git refs can be ordered upon return.
+type RefOrder struct {
+ // The field in which to order refs by. (Required.)
+ Field RefOrderField `json:"field"`
+ // The direction in which to order refs by the specified field. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// ReleaseOrder represents ways in which lists of releases can be ordered upon return.
+type ReleaseOrder struct {
+ // The field in which to order releases by. (Required.)
+ Field ReleaseOrderField `json:"field"`
+ // The direction in which to order releases by the specified field. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// RemoveOutsideCollaboratorInput is an autogenerated input type of RemoveOutsideCollaborator.
+type RemoveOutsideCollaboratorInput struct {
+ // The ID of the outside collaborator to remove. (Required.)
+ UserID ID `json:"userId"`
+ // The ID of the organization to remove the outside collaborator from. (Required.)
+ OrganizationID ID `json:"organizationId"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// RemoveReactionInput is an autogenerated input type of RemoveReaction.
+type RemoveReactionInput struct {
+ // The Node ID of the subject to modify. (Required.)
+ SubjectID ID `json:"subjectId"`
+ // The name of the emoji reaction to remove. (Required.)
+ Content ReactionContent `json:"content"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// RemoveStarInput is an autogenerated input type of RemoveStar.
+type RemoveStarInput struct {
+ // The Starrable ID to unstar. (Required.)
+ StarrableID ID `json:"starrableId"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// RepositoryOrder represents ordering options for repository connections.
+type RepositoryOrder struct {
+ // The field to order repositories by. (Required.)
+ Field RepositoryOrderField `json:"field"`
+ // The ordering direction. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// RequestReviewsInput is an autogenerated input type of RequestReviews.
+type RequestReviewsInput struct {
+ // The Node ID of the pull request to modify. (Required.)
+ PullRequestID ID `json:"pullRequestId"`
+
+ // The Node IDs of the user to request. (Optional.)
+ UserIDs *[]ID `json:"userIds,omitempty"`
+ // The Node IDs of the team to request. (Optional.)
+ TeamIDs *[]ID `json:"teamIds,omitempty"`
+ // Add users to the set rather than replace. (Optional.)
+ Union *Boolean `json:"union,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// StarOrder represents ways in which star connections can be ordered.
+type StarOrder struct {
+ // The field in which to order nodes by. (Required.)
+ Field StarOrderField `json:"field"`
+ // The direction in which to order nodes. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// SubmitPullRequestReviewInput is an autogenerated input type of SubmitPullRequestReview.
+type SubmitPullRequestReviewInput struct {
+ // The Pull Request Review ID to submit. (Required.)
+ PullRequestReviewID ID `json:"pullRequestReviewId"`
+ // The event to send to the Pull Request Review. (Required.)
+ Event PullRequestReviewEvent `json:"event"`
+
+ // The text field to set on the Pull Request Review. (Optional.)
+ Body *String `json:"body,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// TeamMemberOrder represents ordering options for team member connections.
+type TeamMemberOrder struct {
+ // The field to order team members by. (Required.)
+ Field TeamMemberOrderField `json:"field"`
+ // The ordering direction. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// TeamOrder represents ways in which team connections can be ordered.
+type TeamOrder struct {
+ // The field in which to order nodes by. (Required.)
+ Field TeamOrderField `json:"field"`
+ // The direction in which to order nodes. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// TeamRepositoryOrder represents ordering options for team repository connections.
+type TeamRepositoryOrder struct {
+ // The field to order repositories by. (Required.)
+ Field TeamRepositoryOrderField `json:"field"`
+ // The ordering direction. (Required.)
+ Direction OrderDirection `json:"direction"`
+}
+
+// UpdateProjectCardInput is an autogenerated input type of UpdateProjectCard.
+type UpdateProjectCardInput struct {
+ // The ProjectCard ID to update. (Required.)
+ ProjectCardID ID `json:"projectCardId"`
+ // The note of ProjectCard. (Required.)
+ Note String `json:"note"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// UpdateProjectColumnInput is an autogenerated input type of UpdateProjectColumn.
+type UpdateProjectColumnInput struct {
+ // The ProjectColumn ID to update. (Required.)
+ ProjectColumnID ID `json:"projectColumnId"`
+ // The name of project column. (Required.)
+ Name String `json:"name"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// UpdateProjectInput is an autogenerated input type of UpdateProject.
+type UpdateProjectInput struct {
+ // The Project ID to update. (Required.)
+ ProjectID ID `json:"projectId"`
+
+ // The name of project. (Optional.)
+ Name *String `json:"name,omitempty"`
+ // The description of project. (Optional.)
+ Body *String `json:"body,omitempty"`
+ // Whether the project is open or closed. (Optional.)
+ State *ProjectState `json:"state,omitempty"`
+ // Whether the project is public or not. (Optional.)
+ Public *Boolean `json:"public,omitempty"`
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// UpdatePullRequestReviewCommentInput is an autogenerated input type of UpdatePullRequestReviewComment.
+type UpdatePullRequestReviewCommentInput struct {
+ // The Node ID of the comment to modify. (Required.)
+ PullRequestReviewCommentID ID `json:"pullRequestReviewCommentId"`
+ // The text of the comment. (Required.)
+ Body String `json:"body"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// UpdatePullRequestReviewInput is an autogenerated input type of UpdatePullRequestReview.
+type UpdatePullRequestReviewInput struct {
+ // The Node ID of the pull request review to modify. (Required.)
+ PullRequestReviewID ID `json:"pullRequestReviewId"`
+ // The contents of the pull request review body. (Required.)
+ Body String `json:"body"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// UpdateSubscriptionInput is an autogenerated input type of UpdateSubscription.
+type UpdateSubscriptionInput struct {
+ // The Node ID of the subscribable object to modify. (Required.)
+ SubscribableID ID `json:"subscribableId"`
+ // The new state of the subscription. (Required.)
+ State SubscriptionState `json:"state"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
+
+// UpdateTopicsInput is an autogenerated input type of UpdateTopics.
+type UpdateTopicsInput struct {
+ // The Node ID of the repository. (Required.)
+ RepositoryID ID `json:"repositoryId"`
+ // An array of topic names. (Required.)
+ TopicNames []String `json:"topicNames"`
+
+ // A unique identifier for the client performing the mutation. (Optional.)
+ ClientMutationID *String `json:"clientMutationId,omitempty"`
+}
diff --git a/vendor/github.com/shurcooL/githubv4/scalar.go b/vendor/github.com/shurcooL/githubv4/scalar.go
new file mode 100644
index 00000000..56a39530
--- /dev/null
+++ b/vendor/github.com/shurcooL/githubv4/scalar.go
@@ -0,0 +1,139 @@
+package githubv4
+
+import (
+ "crypto/x509"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "time"
+
+ "github.com/shurcooL/graphql"
+)
+
+// Note: These custom types are meant to be used in queries for now.
+// But the plan is to switch to using native Go types (string, int, bool, time.Time, etc.).
+// See https://github.com/shurcooL/githubv4/issues/9 for details.
+//
+// These custom types currently provide documentation, and their use
+// is required for sending outbound queries. However, native Go types
+// can be used for unmarshaling. Once https://github.com/shurcooL/githubv4/issues/9
+// is resolved, native Go types can completely replace these.
+
+type (
+ // Boolean represents true or false values.
+ Boolean graphql.Boolean
+
+ // Date is an ISO-8601 encoded date.
+ Date struct{ time.Time }
+
+ // DateTime is an ISO-8601 encoded UTC date.
+ DateTime struct{ time.Time }
+
+ // Float represents signed double-precision fractional values as
+ // specified by IEEE 754.
+ Float graphql.Float
+
+ // GitObjectID is a Git object ID. For example,
+ // "912ec1990bd09f8fc128c3fa6b59105085aabc03".
+ GitObjectID string
+
+ // GitTimestamp is an ISO-8601 encoded date.
+ // Unlike the DateTime type, GitTimestamp is not converted in UTC.
+ GitTimestamp struct{ time.Time }
+
+ // HTML is a string containing HTML code.
+ HTML string
+
+ // ID represents a unique identifier that is Base64 obfuscated. It
+ // is often used to refetch an object or as key for a cache. The ID
+ // type appears in a JSON response as a String; however, it is not
+ // intended to be human-readable. When expected as an input type,
+ // any string (such as "VXNlci0xMA==") or integer (such as 4) input
+ // value will be accepted as an ID.
+ ID graphql.ID
+
+ // Int represents non-fractional signed whole numeric values.
+ // Int can represent values between -(2^31) and 2^31 - 1.
+ Int graphql.Int
+
+ // String represents textual data as UTF-8 character sequences.
+ // This type is most often used by GraphQL to represent free-form
+ // human-readable text.
+ String graphql.String
+
+ // URI is an RFC 3986, RFC 3987, and RFC 6570 (level 4) compliant URI.
+ URI struct{ *url.URL }
+
+ // X509Certificate is a valid x509 certificate.
+ X509Certificate struct{ *x509.Certificate }
+)
+
+// MarshalJSON implements the json.Marshaler interface.
+// The URI is a quoted string.
+func (u URI) MarshalJSON() ([]byte, error) {
+ return json.Marshal(u.String())
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+// The URI is expected to be a quoted string.
+func (u *URI) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+ var s string
+ err := json.Unmarshal(data, &s)
+ if err != nil {
+ return err
+ }
+ u.URL, err = url.Parse(s)
+ return err
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x X509Certificate) MarshalJSON() ([]byte, error) {
+ // TODO: Implement.
+ return nil, fmt.Errorf("X509Certificate.MarshalJSON: not implemented")
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *X509Certificate) UnmarshalJSON(data []byte) error {
+ // TODO: Implement.
+ return fmt.Errorf("X509Certificate.UnmarshalJSON: not implemented")
+}
+
+// NewBoolean is a helper to make a new *Boolean.
+func NewBoolean(v Boolean) *Boolean { return &v }
+
+// NewDate is a helper to make a new *Date.
+func NewDate(v Date) *Date { return &v }
+
+// NewDateTime is a helper to make a new *DateTime.
+func NewDateTime(v DateTime) *DateTime { return &v }
+
+// NewFloat is a helper to make a new *Float.
+func NewFloat(v Float) *Float { return &v }
+
+// NewGitObjectID is a helper to make a new *GitObjectID.
+func NewGitObjectID(v GitObjectID) *GitObjectID { return &v }
+
+// NewGitTimestamp is a helper to make a new *GitTimestamp.
+func NewGitTimestamp(v GitTimestamp) *GitTimestamp { return &v }
+
+// NewHTML is a helper to make a new *HTML.
+func NewHTML(v HTML) *HTML { return &v }
+
+// NewID is a helper to make a new *ID.
+func NewID(v ID) *ID { return &v }
+
+// NewInt is a helper to make a new *Int.
+func NewInt(v Int) *Int { return &v }
+
+// NewString is a helper to make a new *String.
+func NewString(v String) *String { return &v }
+
+// NewURI is a helper to make a new *URI.
+func NewURI(v URI) *URI { return &v }
+
+// NewX509Certificate is a helper to make a new *X509Certificate.
+func NewX509Certificate(v X509Certificate) *X509Certificate { return &v }
diff --git a/vendor/github.com/shurcooL/go/browser/LICENSE b/vendor/github.com/shurcooL/go/browser/LICENSE
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/github.com/shurcooL/go/browser/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/shurcooL/go/ctxhttp/ctxhttp.go b/vendor/github.com/shurcooL/go/ctxhttp/ctxhttp.go
new file mode 100644
index 00000000..3cf40a26
--- /dev/null
+++ b/vendor/github.com/shurcooL/go/ctxhttp/ctxhttp.go
@@ -0,0 +1,78 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+//
+// It's a copy of "golang.org/x/net/context/ctxhttp" with pre-1.7 support dropped,
+// and "golang.org/x/net/context" import replaced with "context".
+// It exists temporarily until "golang.org/x/net/context/ctxhttp" is updated,
+// which will happen "in a couple releases" according to https://golang.org/cl/24620.
+package ctxhttp
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns
+// an HTTP response.
+//
+// If the client is nil, http.DefaultClient is used.
+//
+// The provided ctx must be non-nil. If it is canceled or times out,
+// ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+ resp, err := client.Do(req.WithContext(ctx))
+ // If we got an error, and the context has been canceled,
+ // the context's error is probably more useful.
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ default:
+ }
+ }
+ return resp, err
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/github.com/shurcooL/graphql/.travis.yml b/vendor/github.com/shurcooL/graphql/.travis.yml
new file mode 100644
index 00000000..93b1fcdb
--- /dev/null
+++ b/vendor/github.com/shurcooL/graphql/.travis.yml
@@ -0,0 +1,16 @@
+sudo: false
+language: go
+go:
+ - 1.x
+ - master
+matrix:
+ allow_failures:
+ - go: master
+ fast_finish: true
+install:
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - go tool vet .
+ - go test -v -race ./...
diff --git a/vendor/github.com/shurcooL/graphql/LICENSE b/vendor/github.com/shurcooL/graphql/LICENSE
new file mode 100644
index 00000000..ca4c7764
--- /dev/null
+++ b/vendor/github.com/shurcooL/graphql/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Dmitri Shuralyov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/shurcooL/graphql/README.md b/vendor/github.com/shurcooL/graphql/README.md
new file mode 100644
index 00000000..6fa03bd4
--- /dev/null
+++ b/vendor/github.com/shurcooL/graphql/README.md
@@ -0,0 +1,293 @@
+graphql
+=======
+
+[![Build Status](https://travis-ci.org/shurcooL/graphql.svg?branch=master)](https://travis-ci.org/shurcooL/graphql) [![GoDoc](https://godoc.org/github.com/shurcooL/graphql?status.svg)](https://godoc.org/github.com/shurcooL/graphql)
+
+Package `graphql` provides a GraphQL client implementation.
+
+For more information, see package [`github.com/shurcooL/githubv4`](https://github.com/shurcooL/githubv4), which is a specialized version targeting GitHub GraphQL API v4. That package is driving the feature development.
+
+**Status:** In active early research and development. The API will change when opportunities for improvement are discovered; it is not yet frozen.
+
+Installation
+------------
+
+`graphql` requires Go version 1.8 or later.
+
+```bash
+go get -u github.com/shurcooL/graphql
+```
+
+Usage
+-----
+
+Construct a GraphQL client, specifying the GraphQL server URL. Then, you can use it to make GraphQL queries and mutations.
+
+```Go
+client := graphql.NewClient("https://example.com/graphql", nil)
+// Use client...
+```
+
+### Authentication
+
+Some GraphQL servers may require authentication. The `graphql` package does not directly handle authentication. Instead, when creating a new client, you're expected to pass an `http.Client` that performs authentication. The easiest and recommended way to do this is to use the [`golang.org/x/oauth2`](https://golang.org/x/oauth2) package. You'll need an OAuth token with the right scopes. Then:
+
+```Go
+import "golang.org/x/oauth2"
+
+func main() {
+ src := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: os.Getenv("GRAPHQL_TOKEN")},
+ )
+ httpClient := oauth2.NewClient(context.Background(), src)
+
+ client := graphql.NewClient("https://example.com/graphql", httpClient)
+ // Use client...
+```
+
+### Simple Query
+
+To make a GraphQL query, you need to define a corresponding Go type.
+
+For example, to make the following GraphQL query:
+
+```GraphQL
+query {
+ me {
+ name
+ }
+}
+```
+
+You can define this variable:
+
+```Go
+var query struct {
+ Me struct {
+ Name graphql.String
+ }
+}
+```
+
+Then call `client.Query`, passing a pointer to it:
+
+```Go
+err := client.Query(context.Background(), &query, nil)
+if err != nil {
+ // Handle error.
+}
+fmt.Println(query.Me.Name)
+
+// Output: Luke Skywalker
+```
+
+### Arguments and Variables
+
+Often, you'll want to specify arguments on some fields. You can use the `graphql` struct field tag for this.
+
+For example, to make the following GraphQL query:
+
+```GraphQL
+{
+ human(id: "1000") {
+ name
+ height(unit: METER)
+ }
+}
+```
+
+You can define this variable:
+
+```Go
+var q struct {
+ Human struct {
+ Name graphql.String
+ Height graphql.Float `graphql:"height(unit: METER)"`
+ } `graphql:"human(id: \"1000\")"`
+}
+```
+
+Then call `client.Query`:
+
+```Go
+err := client.Query(context.Background(), &q, nil)
+if err != nil {
+ // Handle error.
+}
+fmt.Println(q.Human.Name)
+fmt.Println(q.Human.Height)
+
+// Output:
+// Luke Skywalker
+// 1.72
+```
+
+However, that'll only work if the arguments are constant and known in advance. Otherwise, you will need to make use of variables. Replace the constants in the struct field tag with variable names:
+
+```Go
+var q struct {
+ Human struct {
+ Name graphql.String
+ Height graphql.Float `graphql:"height(unit: $unit)"`
+ } `graphql:"human(id: $id)"`
+}
+```
+
+Then, define a `variables` map with their values:
+
+```Go
+variables := map[string]interface{}{
+ "id": graphql.ID(id),
+ "unit": starwars.LengthUnit("METER"),
+}
+```
+
+Finally, call `client.Query` providing `variables`:
+
+```Go
+err := client.Query(context.Background(), &q, variables)
+if err != nil {
+ // Handle error.
+}
+```
+
+### Inline Fragments
+
+Some GraphQL queries contain inline fragments. You can use the `graphql` struct field tag to express them.
+
+For example, to make the following GraphQL query:
+
+```GraphQL
+{
+ hero(episode: "JEDI") {
+ name
+ ... on Droid {
+ primaryFunction
+ }
+ ... on Human {
+ height
+ }
+ }
+}
+```
+
+You can define this variable:
+
+```Go
+var q struct {
+ Hero struct {
+ Name graphql.String
+ Droid struct {
+ PrimaryFunction graphql.String
+ } `graphql:"... on Droid"`
+ Human struct {
+ Height graphql.Float
+ } `graphql:"... on Human"`
+ } `graphql:"hero(episode: \"JEDI\")"`
+}
+```
+
+Alternatively, you can define the struct types corresponding to inline fragments, and use them as embedded fields in your query:
+
+```Go
+type (
+ DroidFragment struct {
+ PrimaryFunction graphql.String
+ }
+ HumanFragment struct {
+ Height graphql.Float
+ }
+)
+
+var q struct {
+ Hero struct {
+ Name graphql.String
+ DroidFragment `graphql:"... on Droid"`
+ HumanFragment `graphql:"... on Human"`
+ } `graphql:"hero(episode: \"JEDI\")"`
+}
+```
+
+Then call `client.Query`:
+
+```Go
+err := client.Query(context.Background(), &q, nil)
+if err != nil {
+ // Handle error.
+}
+fmt.Println(q.Hero.Name)
+fmt.Println(q.Hero.PrimaryFunction)
+fmt.Println(q.Hero.Height)
+
+// Output:
+// R2-D2
+// Astromech
+// 0
+```
+
+### Mutations
+
+Mutations often require information that you can only find out by performing a query first. Let's suppose you've already done that.
+
+For example, to make the following GraphQL mutation:
+
+```GraphQL
+mutation($ep: Episode!, $review: ReviewInput!) {
+ createReview(episode: $ep, review: $review) {
+ stars
+ commentary
+ }
+}
+variables {
+ "ep": "JEDI",
+ "review": {
+ "stars": 5,
+ "commentary": "This is a great movie!"
+ }
+}
+```
+
+You can define:
+
+```Go
+var m struct {
+ CreateReview struct {
+ Stars graphql.Int
+ Commentary graphql.String
+ } `graphql:"createReview(episode: $ep, review: $review)"`
+}
+variables := map[string]interface{}{
+ "ep": starwars.Episode("JEDI"),
+ "review": starwars.ReviewInput{
+ Stars: graphql.Int(5),
+ Commentary: graphql.String("This is a great movie!"),
+ },
+}
+```
+
+Then call `client.Mutate`:
+
+```Go
+err := client.Mutate(context.Background(), &m, variables)
+if err != nil {
+ // Handle error.
+}
+fmt.Printf("Created a %v star review: %v\n", m.CreateReview.Stars, m.CreateReview.Commentary)
+
+// Output:
+// Created a 5 star review: This is a great movie!
+```
+
+Directories
+-----------
+
+| Path | Synopsis |
+|----------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------|
+| [example/graphqldev](https://godoc.org/github.com/shurcooL/graphql/example/graphqldev) | graphqldev is a test program currently being used for developing graphql package. |
+| [ident](https://godoc.org/github.com/shurcooL/graphql/ident) | Package ident provides functions for parsing and converting identifier names between various naming convention. |
+| [internal/jsonutil](https://godoc.org/github.com/shurcooL/graphql/internal/jsonutil) | Package jsonutil provides a function for decoding JSON into a GraphQL query data structure. |
+
+License
+-------
+
+- [MIT License](LICENSE)
diff --git a/vendor/github.com/shurcooL/graphql/doc.go b/vendor/github.com/shurcooL/graphql/doc.go
new file mode 100644
index 00000000..69ec4e03
--- /dev/null
+++ b/vendor/github.com/shurcooL/graphql/doc.go
@@ -0,0 +1,11 @@
+// Package graphql provides a GraphQL client implementation.
+//
+// For more information, see package github.com/shurcooL/githubv4,
+// which is a specialized version targeting GitHub GraphQL API v4.
+// That package is driving the feature development.
+//
+// Status: In active early research and development. The API will change when
+// opportunities for improvement are discovered; it is not yet frozen.
+//
+// For now, see README for more details.
+package graphql // import "github.com/shurcooL/graphql"
diff --git a/vendor/github.com/shurcooL/graphql/graphql.go b/vendor/github.com/shurcooL/graphql/graphql.go
new file mode 100644
index 00000000..f2c5b54f
--- /dev/null
+++ b/vendor/github.com/shurcooL/graphql/graphql.go
@@ -0,0 +1,119 @@
+package graphql
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/shurcooL/go/ctxhttp"
+ "github.com/shurcooL/graphql/internal/jsonutil"
+)
+
+// Client is a GraphQL client.
+type Client struct {
+ url string // GraphQL server URL.
+ httpClient *http.Client
+}
+
+// NewClient creates a GraphQL client targeting the specified GraphQL server URL.
+// If httpClient is nil, then http.DefaultClient is used.
+func NewClient(url string, httpClient *http.Client) *Client {
+ if httpClient == nil {
+ httpClient = http.DefaultClient
+ }
+ return &Client{
+ url: url,
+ httpClient: httpClient,
+ }
+}
+
+// Query executes a single GraphQL query request,
+// with a query derived from q, populating the response into it.
+// q should be a pointer to struct that corresponds to the GraphQL schema.
+func (c *Client) Query(ctx context.Context, q interface{}, variables map[string]interface{}) error {
+ return c.do(ctx, queryOperation, q, variables)
+}
+
+// Mutate executes a single GraphQL mutation request,
+// with a mutation derived from m, populating the response into it.
+// m should be a pointer to struct that corresponds to the GraphQL schema.
+func (c *Client) Mutate(ctx context.Context, m interface{}, variables map[string]interface{}) error {
+ return c.do(ctx, mutationOperation, m, variables)
+}
+
+// do executes a single GraphQL operation.
+func (c *Client) do(ctx context.Context, op operationType, v interface{}, variables map[string]interface{}) error {
+ var query string
+ switch op {
+ case queryOperation:
+ query = constructQuery(v, variables)
+ case mutationOperation:
+ query = constructMutation(v, variables)
+ }
+ in := struct {
+ Query string `json:"query"`
+ Variables map[string]interface{} `json:"variables,omitempty"`
+ }{
+ Query: query,
+ Variables: variables,
+ }
+ var buf bytes.Buffer
+ err := json.NewEncoder(&buf).Encode(in)
+ if err != nil {
+ return err
+ }
+ resp, err := ctxhttp.Post(ctx, c.httpClient, c.url, "application/json", &buf)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("unexpected status: %v", resp.Status)
+ }
+ var out struct {
+ Data *json.RawMessage
+ Errors errors
+ //Extensions interface{} // Unused.
+ }
+ err = json.NewDecoder(resp.Body).Decode(&out)
+ if err != nil {
+ return err
+ }
+ if out.Data != nil {
+ err := jsonutil.UnmarshalGraphQL(*out.Data, v)
+ if err != nil {
+ return err
+ }
+ }
+ if len(out.Errors) > 0 {
+ return out.Errors
+ }
+ return nil
+}
+
+// errors represents the "errors" array in a response from a GraphQL server.
+// If returned via error interface, the slice is expected to contain at least 1 element.
+//
+// Specification: https://facebook.github.io/graphql/#sec-Errors.
+type errors []struct {
+ Message string
+ Locations []struct {
+ Line int
+ Column int
+ }
+}
+
+// Error implements error interface.
+func (e errors) Error() string {
+ return e[0].Message
+}
+
+type operationType uint8
+
+const (
+ queryOperation operationType = iota
+ mutationOperation
+ //subscriptionOperation // Unused.
+)
diff --git a/vendor/github.com/shurcooL/graphql/ident/ident.go b/vendor/github.com/shurcooL/graphql/ident/ident.go
new file mode 100644
index 00000000..29e498ed
--- /dev/null
+++ b/vendor/github.com/shurcooL/graphql/ident/ident.go
@@ -0,0 +1,240 @@
+// Package ident provides functions for parsing and converting identifier names
+// between various naming convention. It has support for MixedCaps, lowerCamelCase,
+// and SCREAMING_SNAKE_CASE naming conventions.
+package ident
+
+import (
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// ParseMixedCaps parses a MixedCaps identifier name.
+//
+// E.g., "ClientMutationID" -> {"Client", "Mutation", "ID"}.
+func ParseMixedCaps(name string) Name {
+ var words Name
+
+ // Split name at any lower -> Upper or Upper -> Upper,lower transitions.
+ // Check each word for initialisms.
+ runes := []rune(name)
+ w, i := 0, 0 // Index of start of word, scan.
+ for i+1 <= len(runes) {
+ eow := false // Whether we hit the end of a word.
+ if i+1 == len(runes) {
+ eow = true
+ } else if unicode.IsLower(runes[i]) && unicode.IsUpper(runes[i+1]) {
+ // lower -> Upper.
+ eow = true
+ } else if i+2 < len(runes) && unicode.IsUpper(runes[i]) && unicode.IsUpper(runes[i+1]) && unicode.IsLower(runes[i+2]) {
+ // Upper -> Upper,lower. End of acronym, followed by a word.
+ eow = true
+
+ if string(runes[i:i+3]) == "IDs" { // Special case, plural form of ID initialism.
+ eow = false
+ }
+ }
+ i++
+ if !eow {
+ continue
+ }
+
+ // [w, i) is a word.
+ word := string(runes[w:i])
+ if initialism, ok := isInitialism(word); ok {
+ words = append(words, initialism)
+ } else if i1, i2, ok := isTwoInitialisms(word); ok {
+ words = append(words, i1, i2)
+ } else {
+ words = append(words, word)
+ }
+ w = i
+ }
+ return words
+}
+
+// ParseLowerCamelCase parses a lowerCamelCase identifier name.
+//
+// E.g., "clientMutationId" -> {"client", "Mutation", "Id"}.
+func ParseLowerCamelCase(name string) Name {
+ var words Name
+
+ // Split name at any Upper letters.
+ runes := []rune(name)
+ w, i := 0, 0 // Index of start of word, scan.
+ for i+1 <= len(runes) {
+ eow := false // Whether we hit the end of a word.
+ if i+1 == len(runes) {
+ eow = true
+ } else if unicode.IsUpper(runes[i+1]) {
+ // Upper letter.
+ eow = true
+ }
+ i++
+ if !eow {
+ continue
+ }
+
+ // [w, i) is a word.
+ words = append(words, string(runes[w:i]))
+ w = i
+ }
+ return words
+}
+
+// ParseScreamingSnakeCase parses a SCREAMING_SNAKE_CASE identifier name.
+//
+// E.g., "CLIENT_MUTATION_ID" -> {"CLIENT", "MUTATION", "ID"}.
+func ParseScreamingSnakeCase(name string) Name {
+ var words Name
+
+ // Split name at '_' characters.
+ runes := []rune(name)
+ w, i := 0, 0 // Index of start of word, scan.
+ for i+1 <= len(runes) {
+ eow := false // Whether we hit the end of a word.
+ if i+1 == len(runes) {
+ eow = true
+ } else if runes[i+1] == '_' {
+ // Underscore.
+ eow = true
+ }
+ i++
+ if !eow {
+ continue
+ }
+
+ // [w, i) is a word.
+ words = append(words, string(runes[w:i]))
+ if i < len(runes) && runes[i] == '_' {
+ // Skip underscore.
+ i++
+ }
+ w = i
+ }
+ return words
+}
+
+// Name is an identifier name, broken up into individual words.
+type Name []string
+
+// ToMixedCaps expresses identifer name in MixedCaps naming convention.
+//
+// E.g., "ClientMutationID".
+func (n Name) ToMixedCaps() string {
+ for i, word := range n {
+ if strings.EqualFold(word, "IDs") { // Special case, plural form of ID initialism.
+ n[i] = "IDs"
+ continue
+ }
+ if initialism, ok := isInitialism(word); ok {
+ n[i] = initialism
+ continue
+ }
+ if brand, ok := isBrand(word); ok {
+ n[i] = brand
+ continue
+ }
+ r, size := utf8.DecodeRuneInString(word)
+ n[i] = string(unicode.ToUpper(r)) + strings.ToLower(word[size:])
+ }
+ return strings.Join(n, "")
+}
+
+// ToLowerCamelCase expresses identifer name in lowerCamelCase naming convention.
+//
+// E.g., "clientMutationId".
+func (n Name) ToLowerCamelCase() string {
+ for i, word := range n {
+ if i == 0 {
+ n[i] = strings.ToLower(word)
+ continue
+ }
+ r, size := utf8.DecodeRuneInString(word)
+ n[i] = string(unicode.ToUpper(r)) + strings.ToLower(word[size:])
+ }
+ return strings.Join(n, "")
+}
+
+// isInitialism reports whether word is an initialism.
+func isInitialism(word string) (string, bool) {
+ initialism := strings.ToUpper(word)
+ _, ok := initialisms[initialism]
+ return initialism, ok
+}
+
+// isTwoInitialisms reports whether word is two initialisms.
+func isTwoInitialisms(word string) (string, string, bool) {
+ word = strings.ToUpper(word)
+ for i := 2; i <= len(word)-2; i++ { // Shortest initialism is 2 characters long.
+ _, ok1 := initialisms[word[:i]]
+ _, ok2 := initialisms[word[i:]]
+ if ok1 && ok2 {
+ return word[:i], word[i:], true
+ }
+ }
+ return "", "", false
+}
+
+// initialisms is the set of initialisms in the MixedCaps naming convention.
+// Only add entries that are highly unlikely to be non-initialisms.
+// For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
+var initialisms = map[string]struct{}{
+ // These are the common initialisms from golint. Keep them in sync
+ // with https://gotools.org/github.com/golang/lint#commonInitialisms.
+ "ACL": {},
+ "API": {},
+ "ASCII": {},
+ "CPU": {},
+ "CSS": {},
+ "DNS": {},
+ "EOF": {},
+ "GUID": {},
+ "HTML": {},
+ "HTTP": {},
+ "HTTPS": {},
+ "ID": {},
+ "IP": {},
+ "JSON": {},
+ "LHS": {},
+ "QPS": {},
+ "RAM": {},
+ "RHS": {},
+ "RPC": {},
+ "SLA": {},
+ "SMTP": {},
+ "SQL": {},
+ "SSH": {},
+ "TCP": {},
+ "TLS": {},
+ "TTL": {},
+ "UDP": {},
+ "UI": {},
+ "UID": {},
+ "UUID": {},
+ "URI": {},
+ "URL": {},
+ "UTF8": {},
+ "VM": {},
+ "XML": {},
+ "XMPP": {},
+ "XSRF": {},
+ "XSS": {},
+
+ // Additional common initialisms.
+ "RSS": {},
+}
+
+// isBrand reports whether word is a brand.
+func isBrand(word string) (string, bool) {
+ brand, ok := brands[strings.ToLower(word)]
+ return brand, ok
+}
+
+// brands is the map of brands in the MixedCaps naming convention;
+// see https://dmitri.shuralyov.com/idiomatic-go#for-brands-or-words-with-more-than-1-capital-letter-lowercase-all-letters.
+// Key is the lower case version of the brand, value is the canonical brand spelling.
+// Only add entries that are highly unlikely to be non-brands.
+var brands = map[string]string{
+ "github": "GitHub",
+}
diff --git a/vendor/github.com/shurcooL/graphql/internal/jsonutil/graphql.go b/vendor/github.com/shurcooL/graphql/internal/jsonutil/graphql.go
new file mode 100644
index 00000000..9e75791d
--- /dev/null
+++ b/vendor/github.com/shurcooL/graphql/internal/jsonutil/graphql.go
@@ -0,0 +1,308 @@
+// Package jsonutil provides a function for decoding JSON
+// into a GraphQL query data structure.
+package jsonutil
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// UnmarshalGraphQL parses the JSON-encoded GraphQL response data and stores
+// the result in the GraphQL query data structure pointed to by v.
+//
+// The implementation is created on top of the JSON tokenizer available
+// in "encoding/json".Decoder.
+func UnmarshalGraphQL(data []byte, v interface{}) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ dec.UseNumber()
+ err := (&decoder{tokenizer: dec}).Decode(v)
+ if err != nil {
+ return err
+ }
+ tok, err := dec.Token()
+ switch err {
+ case io.EOF:
+ // Expect to get io.EOF. There shouldn't be any more
+ // tokens left after we've decoded v successfully.
+ return nil
+ case nil:
+ return fmt.Errorf("invalid token '%v' after top-level value", tok)
+ default:
+ return err
+ }
+}
+
+// decoder is a JSON decoder that performs custom unmarshaling behavior
+// for GraphQL query data structures. It's implemented on top of a JSON tokenizer.
+type decoder struct {
+ tokenizer interface {
+ Token() (json.Token, error)
+ }
+
+ // Stack of what part of input JSON we're in the middle of - objects, arrays.
+ parseState []json.Delim
+
+ // Stacks of values where to unmarshal.
+ // The top of each stack is the reflect.Value where to unmarshal next JSON value.
+ //
+ // The reason there's more than one stack is because we might be unmarshaling
+ // a single JSON value into multiple GraphQL fragments or embedded structs, so
+ // we keep track of them all.
+ vs [][]reflect.Value
+}
+
+// Decode decodes a single JSON value from d.tokenizer into v.
+func (d *decoder) Decode(v interface{}) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return fmt.Errorf("cannot decode into non-pointer %T", v)
+ }
+ d.vs = [][]reflect.Value{{rv.Elem()}}
+ return d.decode()
+}
+
+// decode decodes a single JSON value from d.tokenizer into d.vs.
+func (d *decoder) decode() error {
+ // The loop invariant is that the top of each d.vs stack
+ // is where we try to unmarshal the next JSON value we see.
+ for len(d.vs) > 0 {
+ tok, err := d.tokenizer.Token()
+ if err == io.EOF {
+ return errors.New("unexpected end of JSON input")
+ } else if err != nil {
+ return err
+ }
+
+ switch {
+
+ // Are we inside an object and seeing next key (rather than end of object)?
+ case d.state() == '{' && tok != json.Delim('}'):
+ key, ok := tok.(string)
+ if !ok {
+ return errors.New("unexpected non-key in JSON input")
+ }
+ someFieldExist := false
+ for i := range d.vs {
+ v := d.vs[i][len(d.vs[i])-1]
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ var f reflect.Value
+ if v.Kind() == reflect.Struct {
+ f = fieldByGraphQLName(v, key)
+ if f.IsValid() {
+ someFieldExist = true
+ }
+ }
+ d.vs[i] = append(d.vs[i], f)
+ }
+ if !someFieldExist {
+ return fmt.Errorf("struct field for %s doesn't exist in any of %v places to unmarshal", key, len(d.vs))
+ }
+
+ // We've just consumed the current token, which was the key.
+ // Read the next token, which should be the value, and let the rest of code process it.
+ tok, err = d.tokenizer.Token()
+ if err == io.EOF {
+ return errors.New("unexpected end of JSON input")
+ } else if err != nil {
+ return err
+ }
+
+ // Are we inside an array and seeing next value (rather than end of array)?
+ case d.state() == '[' && tok != json.Delim(']'):
+ someSliceExist := false
+ for i := range d.vs {
+ v := d.vs[i][len(d.vs[i])-1]
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ var f reflect.Value
+ if v.Kind() == reflect.Slice {
+ v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) // v = append(v, T).
+ f = v.Index(v.Len() - 1)
+ someSliceExist = true
+ }
+ d.vs[i] = append(d.vs[i], f)
+ }
+ if !someSliceExist {
+ return fmt.Errorf("slice doesn't exist in any of %v places to unmarshal", len(d.vs))
+ }
+ }
+
+ switch tok := tok.(type) {
+ case string, json.Number, bool, nil:
+ // Value.
+
+ for i := range d.vs {
+ v := d.vs[i][len(d.vs[i])-1]
+ if !v.IsValid() {
+ continue
+ }
+ err := unmarshalValue(tok, v)
+ if err != nil {
+ return err
+ }
+ }
+ d.popAllVs()
+
+ case json.Delim:
+ switch tok {
+ case '{':
+ // Start of object.
+
+ d.pushState(tok)
+
+ frontier := make([]reflect.Value, len(d.vs)) // Places to look for GraphQL fragments/embedded structs.
+ for i := range d.vs {
+ v := d.vs[i][len(d.vs[i])-1]
+ frontier[i] = v
+ // TODO: Do this recursively or not? Add a test case if needed.
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem())) // v = new(T).
+ }
+ }
+ // Find GraphQL fragments/embedded structs recursively, adding to frontier
+ // as new ones are discovered and exploring them further.
+ for len(frontier) > 0 {
+ v := frontier[0]
+ frontier = frontier[1:]
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if v.Kind() != reflect.Struct {
+ continue
+ }
+ for i := 0; i < v.NumField(); i++ {
+ if isGraphQLFragment(v.Type().Field(i)) || v.Type().Field(i).Anonymous {
+ // Add GraphQL fragment or embedded struct.
+ d.vs = append(d.vs, []reflect.Value{v.Field(i)})
+ frontier = append(frontier, v.Field(i))
+ }
+ }
+ }
+ case '[':
+ // Start of array.
+
+ d.pushState(tok)
+
+ for i := range d.vs {
+ v := d.vs[i][len(d.vs[i])-1]
+ // TODO: Confirm this is needed, write a test case.
+ //if v.Kind() == reflect.Ptr && v.IsNil() {
+ // v.Set(reflect.New(v.Type().Elem())) // v = new(T).
+ //}
+
+ // Reset slice to empty (in case it had non-zero initial value).
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if v.Kind() != reflect.Slice {
+ continue
+ }
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0)) // v = make(T, 0, 0).
+ }
+ case '}', ']':
+ // End of object or array.
+ d.popAllVs()
+ d.popState()
+ default:
+ return errors.New("unexpected delimiter in JSON input")
+ }
+ default:
+ return errors.New("unexpected token in JSON input")
+ }
+ }
+ return nil
+}
+
+// pushState pushes a new parse state s onto the stack.
+func (d *decoder) pushState(s json.Delim) {
+ d.parseState = append(d.parseState, s)
+}
+
+// popState pops a parse state (already obtained) off the stack.
+// The stack must be non-empty.
+func (d *decoder) popState() {
+ d.parseState = d.parseState[:len(d.parseState)-1]
+}
+
+// state reports the parse state on top of stack, or 0 if empty.
+func (d *decoder) state() json.Delim {
+ if len(d.parseState) == 0 {
+ return 0
+ }
+ return d.parseState[len(d.parseState)-1]
+}
+
+// popAllVs pops from all d.vs stacks, keeping only non-empty ones.
+func (d *decoder) popAllVs() {
+ var nonEmpty [][]reflect.Value
+ for i := range d.vs {
+ d.vs[i] = d.vs[i][:len(d.vs[i])-1]
+ if len(d.vs[i]) > 0 {
+ nonEmpty = append(nonEmpty, d.vs[i])
+ }
+ }
+ d.vs = nonEmpty
+}
+
+// fieldByGraphQLName returns a struct field of struct v that matches GraphQL name,
+// or invalid reflect.Value if none found.
+func fieldByGraphQLName(v reflect.Value, name string) reflect.Value {
+ for i := 0; i < v.NumField(); i++ {
+ if hasGraphQLName(v.Type().Field(i), name) {
+ return v.Field(i)
+ }
+ }
+ return reflect.Value{}
+}
+
+// hasGraphQLName reports whether struct field f has GraphQL name.
+func hasGraphQLName(f reflect.StructField, name string) bool {
+ value, ok := f.Tag.Lookup("graphql")
+ if !ok {
+ // TODO: caseconv package is relatively slow. Optimize it, then consider using it here.
+ //return caseconv.MixedCapsToLowerCamelCase(f.Name) == name
+ return strings.EqualFold(f.Name, name)
+ }
+ value = strings.TrimSpace(value) // TODO: Parse better.
+ if strings.HasPrefix(value, "...") {
+ // GraphQL fragment. It doesn't have a name.
+ return false
+ }
+ if i := strings.Index(value, "("); i != -1 {
+ value = value[:i]
+ }
+ if i := strings.Index(value, ":"); i != -1 {
+ value = value[:i]
+ }
+ return strings.TrimSpace(value) == name
+}
+
+// isGraphQLFragment reports whether struct field f is a GraphQL fragment.
+func isGraphQLFragment(f reflect.StructField) bool {
+ value, ok := f.Tag.Lookup("graphql")
+ if !ok {
+ return false
+ }
+ value = strings.TrimSpace(value) // TODO: Parse better.
+ return strings.HasPrefix(value, "...")
+}
+
+// unmarshalValue unmarshals JSON value into v.
+func unmarshalValue(value json.Token, v reflect.Value) error {
+ b, err := json.Marshal(value) // TODO: Short-circuit (if profiling says it's worth it).
+ if err != nil {
+ return err
+ }
+ if !v.CanAddr() {
+ return fmt.Errorf("value %v is not addressable", v)
+ }
+ return json.Unmarshal(b, v.Addr().Interface())
+}
diff --git a/vendor/github.com/shurcooL/graphql/query.go b/vendor/github.com/shurcooL/graphql/query.go
new file mode 100644
index 00000000..e10b7718
--- /dev/null
+++ b/vendor/github.com/shurcooL/graphql/query.go
@@ -0,0 +1,131 @@
+package graphql
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "reflect"
+ "sort"
+
+ "github.com/shurcooL/graphql/ident"
+)
+
+func constructQuery(v interface{}, variables map[string]interface{}) string {
+ query := query(v)
+ if len(variables) > 0 {
+ return "query(" + queryArguments(variables) + ")" + query
+ }
+ return query
+}
+
+func constructMutation(v interface{}, variables map[string]interface{}) string {
+ query := query(v)
+ if len(variables) > 0 {
+ return "mutation(" + queryArguments(variables) + ")" + query
+ }
+ return "mutation" + query
+}
+
+// queryArguments constructs a minified arguments string for variables.
+//
+// E.g., map[string]interface{}{"a": Int(123), "b": NewBoolean(true)} -> "$a:Int!$b:Boolean".
+func queryArguments(variables map[string]interface{}) string {
+ // Sort keys in order to produce deterministic output for testing purposes.
+ // TODO: If tests can be made to work with non-deterministic output, then no need to sort.
+ keys := make([]string, 0, len(variables))
+ for k := range variables {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ var buf bytes.Buffer
+ for _, k := range keys {
+ io.WriteString(&buf, "$")
+ io.WriteString(&buf, k)
+ io.WriteString(&buf, ":")
+ writeArgumentType(&buf, reflect.TypeOf(variables[k]), true)
+ // Don't insert a comma here.
+ // Commas in GraphQL are insignificant, and we want minified output.
+ // See https://facebook.github.io/graphql/October2016/#sec-Insignificant-Commas.
+ }
+ return buf.String()
+}
+
+// writeArgumentType writes a minified GraphQL type for t to w.
+// value indicates whether t is a value (required) type or pointer (optional) type.
+// If value is true, then "!" is written at the end of t.
+func writeArgumentType(w io.Writer, t reflect.Type, value bool) {
+ if t.Kind() == reflect.Ptr {
+ // Pointer is an optional type, so no "!" at the end of the pointer's underlying type.
+ writeArgumentType(w, t.Elem(), false)
+ return
+ }
+
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array:
+ // List. E.g., "[Int]".
+ io.WriteString(w, "[")
+ writeArgumentType(w, t.Elem(), true)
+ io.WriteString(w, "]")
+ default:
+ // Named type. E.g., "Int".
+ name := t.Name()
+ if name == "string" { // HACK: Workaround for https://github.com/shurcooL/githubv4/issues/12.
+ name = "ID"
+ }
+ io.WriteString(w, name)
+ }
+
+ if value {
+ // Value is a required type, so add "!" to the end.
+ io.WriteString(w, "!")
+ }
+}
+
+// query uses writeQuery to recursively construct
+// a minified query string from the provided struct v.
+//
+// E.g., struct{Foo Int, BarBaz *Boolean} -> "{foo,barBaz}".
+func query(v interface{}) string {
+ var buf bytes.Buffer
+ writeQuery(&buf, reflect.TypeOf(v), false)
+ return buf.String()
+}
+
+// writeQuery writes a minified query for t to w.
+// If inline is true, the struct fields of t are inlined into parent struct.
+func writeQuery(w io.Writer, t reflect.Type, inline bool) {
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Slice:
+ writeQuery(w, t.Elem(), false)
+ case reflect.Struct:
+ // If the type implements json.Unmarshaler, it's a scalar. Don't expand it.
+ if reflect.PtrTo(t).Implements(jsonUnmarshaler) {
+ return
+ }
+ if !inline {
+ io.WriteString(w, "{")
+ }
+ for i := 0; i < t.NumField(); i++ {
+ if i != 0 {
+ io.WriteString(w, ",")
+ }
+ f := t.Field(i)
+ value, ok := f.Tag.Lookup("graphql")
+ inlineField := f.Anonymous && !ok
+ if !inlineField {
+ if ok {
+ io.WriteString(w, value)
+ } else {
+ io.WriteString(w, ident.ParseMixedCaps(f.Name).ToLowerCamelCase())
+ }
+ }
+ writeQuery(w, f.Type, inlineField)
+ }
+ if !inline {
+ io.WriteString(w, "}")
+ }
+ }
+}
+
+var jsonUnmarshaler = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
diff --git a/vendor/github.com/shurcooL/graphql/scalar.go b/vendor/github.com/shurcooL/graphql/scalar.go
new file mode 100644
index 00000000..0f7ceea5
--- /dev/null
+++ b/vendor/github.com/shurcooL/graphql/scalar.go
@@ -0,0 +1,51 @@
+package graphql
+
+// Note: These custom types are meant to be used in queries for now.
+// But the plan is to switch to using native Go types (string, int, bool, time.Time, etc.).
+// See https://github.com/shurcooL/githubv4/issues/9 for details.
+//
+// These custom types currently provide documentation, and their use
+// is required for sending outbound queries. However, native Go types
+// can be used for unmarshaling. Once https://github.com/shurcooL/githubv4/issues/9
+// is resolved, native Go types can completely replace these.
+
+type (
+ // Boolean represents true or false values.
+ Boolean bool
+
+ // Float represents signed double-precision fractional values as
+ // specified by IEEE 754.
+ Float float64
+
+ // ID represents a unique identifier that is Base64 obfuscated. It
+ // is often used to refetch an object or as key for a cache. The ID
+ // type appears in a JSON response as a String; however, it is not
+ // intended to be human-readable. When expected as an input type,
+ // any string (such as "VXNlci0xMA==") or integer (such as 4) input
+ // value will be accepted as an ID.
+ ID interface{}
+
+ // Int represents non-fractional signed whole numeric values.
+ // Int can represent values between -(2^31) and 2^31 - 1.
+ Int int32
+
+ // String represents textual data as UTF-8 character sequences.
+ // This type is most often used by GraphQL to represent free-form
+ // human-readable text.
+ String string
+)
+
+// NewBoolean is a helper to make a new *Boolean.
+func NewBoolean(v Boolean) *Boolean { return &v }
+
+// NewFloat is a helper to make a new *Float.
+func NewFloat(v Float) *Float { return &v }
+
+// NewID is a helper to make a new *ID.
+func NewID(v ID) *ID { return &v }
+
+// NewInt is a helper to make a new *Int.
+func NewInt(v Int) *Int { return &v }
+
+// NewString is a helper to make a new *String.
+func NewString(v String) *String { return &v }
diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 00000000..73309904
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 00000000..a3c021d3
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,56 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+// As of Go 1.7 this package is available in the standard library under the
+// name context. https://golang.org/pkg/context.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter). TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+ return todo
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 00000000..606cf1f9
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,74 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns
+// an HTTP response.
+//
+// If the client is nil, http.DefaultClient is used.
+//
+// The provided ctx must be non-nil. If it is canceled or times out,
+// ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+ resp, err := client.Do(req.WithContext(ctx))
+ // If we got an error, and the context has been canceled,
+ // the context's error is probably more useful.
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ default:
+ }
+ }
+ return resp, err
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
new file mode 100644
index 00000000..926870cc
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
@@ -0,0 +1,147 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+func nop() {}
+
+var (
+ testHookContextDoneBeforeHeaders = nop
+ testHookDoReturned = nop
+ testHookDidBodyClose = nop
+)
+
+// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+
+ // TODO(djd): Respect any existing value of req.Cancel.
+ cancel := make(chan struct{})
+ req.Cancel = cancel
+
+ type responseAndError struct {
+ resp *http.Response
+ err error
+ }
+ result := make(chan responseAndError, 1)
+
+ // Make local copies of test hooks closed over by goroutines below.
+ // Prevents data races in tests.
+ testHookDoReturned := testHookDoReturned
+ testHookDidBodyClose := testHookDidBodyClose
+
+ go func() {
+ resp, err := client.Do(req)
+ testHookDoReturned()
+ result <- responseAndError{resp, err}
+ }()
+
+ var resp *http.Response
+
+ select {
+ case <-ctx.Done():
+ testHookContextDoneBeforeHeaders()
+ close(cancel)
+ // Clean up after the goroutine calling client.Do:
+ go func() {
+ if r := <-result; r.resp != nil {
+ testHookDidBodyClose()
+ r.resp.Body.Close()
+ }
+ }()
+ return nil, ctx.Err()
+ case r := <-result:
+ var err error
+ resp, err = r.resp, r.err
+ if err != nil {
+ return resp, err
+ }
+ }
+
+ c := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ close(cancel)
+ case <-c:
+ // The response's Body is closed.
+ }
+ }()
+ resp.Body = &notifyingReader{resp.Body, c}
+
+ return resp, nil
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
+
+// notifyingReader is an io.ReadCloser that closes the notify channel after
+// Close is called or a Read fails on the underlying ReadCloser.
+type notifyingReader struct {
+ io.ReadCloser
+ notify chan<- struct{}
+}
+
+func (r *notifyingReader) Read(p []byte) (int, error) {
+ n, err := r.ReadCloser.Read(p)
+ if err != nil && r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return n, err
+}
+
+func (r *notifyingReader) Close() error {
+ err := r.ReadCloser.Close()
+ if r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return err
+}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
new file mode 100644
index 00000000..d20f52b7
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package context
+
+import (
+ "context" // standard library's context, as of Go 1.7
+ "time"
+)
+
+var (
+ todo = context.TODO()
+ background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ ctx, f := context.WithCancel(parent)
+ return ctx, CancelFunc(f)
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ ctx, f := context.WithDeadline(parent, deadline)
+ return ctx, CancelFunc(f)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return context.WithValue(parent, key, val)
+}
diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go
new file mode 100644
index 00000000..d88bd1db
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go19.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.9
+
+package context
+
+import "context" // standard library's context, as of Go 1.7
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context = context.Context
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
new file mode 100644
index 00000000..0f35592d
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -0,0 +1,300 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package context
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ c := newCancelCtx(parent)
+ propagateCancel(parent, c)
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+ return &cancelCtx{
+ Context: parent,
+ done: make(chan struct{}),
+ }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]bool)
+ }
+ p.children[child] = true
+ }
+ p.mu.Unlock()
+ } else {
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ for {
+ switch c := parent.(type) {
+ case *cancelCtx:
+ return c, true
+ case *timerCtx:
+ return c.cancelCtx, true
+ case *valueCtx:
+ parent = c.Context
+ default:
+ return nil, false
+ }
+ }
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ done chan struct{} // closed by the first cancel call.
+
+ mu sync.Mutex
+ children map[canceler]bool // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ return c.done
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.err
+}
+
+func (c *cancelCtx) String() string {
+ return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: deadline,
+ }
+ propagateCancel(parent, c)
+ d := deadline.Sub(time.Now())
+ if d <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(true, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(d, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ *cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+ return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+ if c.key == key {
+ return c.val
+ }
+ return c.Context.Value(key)
+}
diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go
new file mode 100644
index 00000000..b105f80b
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go19.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.9
+
+package context
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml
new file mode 100644
index 00000000..fa139db2
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - tip
+
+install:
+ - export GOPATH="$HOME/gopath"
+ - mkdir -p "$GOPATH/src/golang.org/x"
+ - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
+ - go get -v -t -d golang.org/x/oauth2/...
+
+script:
+ - go test -v golang.org/x/oauth2/...
diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 00000000..dfbed62c
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,26 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+## Filing issues
+
+When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
new file mode 100644
index 00000000..eb8dcee1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -0,0 +1,77 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+Or you can manually git clone the repository to
+`$(go env GOPATH)/src/golang.org/x/oauth2`.
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
+## App Engine
+
+In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor
+of the [`context.Context`](https://golang.org/x/net/context#Context) type from
+the `golang.org/x/net/context` package
+
+This means it's no longer possible to use the "Classic App Engine"
+`appengine.Context` type with the `oauth2` package. (You're using
+Classic App Engine if you import the package `"appengine"`.)
+
+To work around this, you may use the new `"google.golang.org/appengine"`
+package. This package has almost the same API as the `"appengine"` package,
+but it can be fetched with `go get` and used on "Managed VMs" and well as
+Classic App Engine.
+
+See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
+for information on updating your app.
+
+If you don't want to update your entire app to use the new App Engine packages,
+you may use both sets of packages in parallel, using only the new packages
+with the `oauth2` package.
+
+```go
+import (
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ newappengine "google.golang.org/appengine"
+ newurlfetch "google.golang.org/appengine/urlfetch"
+
+ "appengine"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ var c appengine.Context = appengine.NewContext(r)
+ c.Infof("Logging a message with the old package")
+
+ var ctx context.Context = newappengine.NewContext(r)
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: google.AppEngineTokenSource(ctx, "scope"),
+ Base: &newurlfetch.Transport{Context: ctx},
+ },
+ }
+ client.Get("...")
+}
+```
+
+## Report Issues / Send Patches
+
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html.
+
+The main issue tracker for the oauth2 repository is located at
+https://github.com/golang/oauth2/issues.
diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go
new file mode 100644
index 00000000..74348718
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import "google.golang.org/appengine/urlfetch"
+
+func init() {
+ appengineClientHook = urlfetch.Client
+}
diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go
new file mode 100644
index 00000000..03265e88
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 00000000..fc63fcab
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(key)
+ if block != nil {
+ key = block.Bytes
+ }
+ parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+ }
+ }
+ parsed, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("private key is invalid")
+ }
+ return parsed, nil
+}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
new file mode 100644
index 00000000..5c5451ad
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -0,0 +1,272 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+)
+
+// Token represents the credentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// This type is a mirror of oauth2.Token and exists to break
+// an otherwise-circular dependency. Other internal packages
+// should convert this Token into an oauth2.Token before use.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time
+
+ // Raw optionally contains extra metadata from the server
+ // when updating a token.
+ Raw interface{}
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+ Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+ if v := e.ExpiresIn; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ if v := e.Expires; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+ var n json.Number
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+ i, err := n.Int64()
+ if err != nil {
+ return err
+ }
+ *e = expirationTime(i)
+ return nil
+}
+
+var brokenAuthHeaderProviders = []string{
+ "https://accounts.google.com/",
+ "https://api.codeswholesale.com/oauth/token",
+ "https://api.dropbox.com/",
+ "https://api.dropboxapi.com/",
+ "https://api.instagram.com/",
+ "https://api.netatmo.net/",
+ "https://api.odnoklassniki.ru/",
+ "https://api.pushbullet.com/",
+ "https://api.soundcloud.com/",
+ "https://api.twitch.tv/",
+ "https://id.twitch.tv/",
+ "https://app.box.com/",
+ "https://api.box.com/",
+ "https://connect.stripe.com/",
+ "https://login.mailchimp.com/",
+ "https://login.microsoftonline.com/",
+ "https://login.salesforce.com/",
+ "https://login.windows.net",
+ "https://login.live.com/",
+ "https://oauth.sandbox.trainingpeaks.com/",
+ "https://oauth.trainingpeaks.com/",
+ "https://oauth.vk.com/",
+ "https://openapi.baidu.com/",
+ "https://slack.com/",
+ "https://test-sandbox.auth.corp.google.com",
+ "https://test.salesforce.com/",
+ "https://user.gini.net/",
+ "https://www.douban.com/",
+ "https://www.googleapis.com/",
+ "https://www.linkedin.com/",
+ "https://www.strava.com/oauth/",
+ "https://www.wunderlist.com/oauth/",
+ "https://api.patreon.com/",
+ "https://sandbox.codeswholesale.com/oauth/token",
+ "https://api.sipgate.com/v1/authorization/oauth",
+ "https://api.medium.com/v1/tokens",
+ "https://log.finalsurge.com/oauth/token",
+ "https://multisport.todaysplan.com.au/rest/oauth/access_token",
+ "https://whats.todaysplan.com.au/rest/oauth/access_token",
+ "https://stackoverflow.com/oauth/access_token",
+ "https://account.health.nokia.com",
+}
+
+// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints.
+var brokenAuthHeaderDomains = []string{
+ ".auth0.com",
+ ".force.com",
+ ".myshopify.com",
+ ".okta.com",
+ ".oktapreview.com",
+}
+
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+ brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+func providerAuthHeaderWorks(tokenURL string) bool {
+ for _, s := range brokenAuthHeaderProviders {
+ if strings.HasPrefix(tokenURL, s) {
+ // Some sites fail to implement the OAuth2 spec fully.
+ return false
+ }
+ }
+
+ if u, err := url.Parse(tokenURL); err == nil {
+ for _, s := range brokenAuthHeaderDomains {
+ if strings.HasSuffix(u.Host, s) {
+ return false
+ }
+ }
+ }
+
+ // Assume the provider implements the spec properly
+ // otherwise. We can add more exceptions as they're
+ // discovered. We will _not_ be adding configurable hooks
+ // to this package to let users select server bugs.
+ return true
+}
+
+func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) {
+ bustedAuth := !providerAuthHeaderWorks(tokenURL)
+ if bustedAuth {
+ if clientID != "" {
+ v.Set("client_id", clientID)
+ }
+ if clientSecret != "" {
+ v.Set("client_secret", clientSecret)
+ }
+ }
+ req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if !bustedAuth {
+ req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
+ }
+ r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if code := r.StatusCode; code < 200 || code > 299 {
+ return nil, &RetrieveError{
+ Response: r,
+ Body: body,
+ }
+ }
+
+ var token *Token
+ content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ switch content {
+ case "application/x-www-form-urlencoded", "text/plain":
+ vals, err := url.ParseQuery(string(body))
+ if err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: vals.Get("access_token"),
+ TokenType: vals.Get("token_type"),
+ RefreshToken: vals.Get("refresh_token"),
+ Raw: vals,
+ }
+ e := vals.Get("expires_in")
+ if e == "" {
+ // TODO(jbd): Facebook's OAuth2 implementation is broken and
+ // returns expires_in field in expires. Remove the fallback to expires,
+ // when Facebook fixes their implementation.
+ e = vals.Get("expires")
+ }
+ expires, _ := strconv.Atoi(e)
+ if expires != 0 {
+ token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+ }
+ default:
+ var tj tokenJSON
+ if err = json.Unmarshal(body, &tj); err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: tj.AccessToken,
+ TokenType: tj.TokenType,
+ RefreshToken: tj.RefreshToken,
+ Expiry: tj.expiry(),
+ Raw: make(map[string]interface{}),
+ }
+ json.Unmarshal(body, &token.Raw) // no error checks for optional fields
+ }
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ if token.AccessToken == "" {
+ return token, errors.New("oauth2: server response missing access_token")
+ }
+ return token, nil
+}
+
+type RetrieveError struct {
+ Response *http.Response
+ Body []byte
+}
+
+func (r *RetrieveError) Error() string {
+ return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
new file mode 100644
index 00000000..d16f9ae1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -0,0 +1,34 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient ContextKey
+
+// ContextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a ContextKey, being unexported.
+type ContextKey struct{}
+
+var appengineClientHook func(context.Context) *http.Client
+
+func ContextClient(ctx context.Context) *http.Client {
+ if ctx != nil {
+ if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+ return hc
+ }
+ }
+ if appengineClientHook != nil {
+ return appengineClientHook(ctx)
+ }
+ return http.DefaultClient
+}
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 00000000..16775d08
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,362 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests,
+// as specified in RFC 6749.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2 // import "golang.org/x/oauth2"
+
+import (
+ "bytes"
+ "errors"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+//
+// Deprecated: Use context.Background() or context.TODO() instead.
+var NoContext = context.TODO()
+
+// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
+// identified by the tokenURL prefix as an OAuth2 implementation
+// which doesn't support the HTTP Basic authentication
+// scheme to authenticate with the authorization server.
+// Once a server is registered, credentials (client_id and client_secret)
+// will be passed as query parameters rather than being present
+// in the Authorization header.
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+ internal.RegisterBrokenAuthHeaderProvider(tokenURL)
+}
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
+// package (https://golang.org/x/oauth2/clientcredentials).
+type Config struct {
+ // ClientID is the application's ID.
+ ClientID string
+
+ // ClientSecret is the application's secret.
+ ClientSecret string
+
+ // Endpoint contains the resource server's token endpoint
+ // URLs. These are constants specific to each server and are
+ // often available via site-specific packages, such as
+ // google.Endpoint or github.Endpoint.
+ Endpoint Endpoint
+
+ // RedirectURL is the URL to redirect users going through
+ // the OAuth flow, after the resource owner's URLs.
+ RedirectURL string
+
+ // Scope specifies optional requested permissions.
+ Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+ // Token returns a token or an error.
+ // Token must be safe for concurrent use by multiple goroutines.
+ // The returned Token must not be modified.
+ Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+ AuthURL string
+ TokenURL string
+}
+
+var (
+ // AccessTypeOnline and AccessTypeOffline are options passed
+ // to the Options.AuthCodeURL method. They modify the
+ // "access_type" field that gets sent in the URL returned by
+ // AuthCodeURL.
+ //
+ // Online is the default if neither is specified. If your
+ // application needs to refresh access tokens when the user
+ // is not present at the browser, then use offline. This will
+ // result in your application obtaining a refresh token the
+ // first time your application exchanges an authorization
+ // code for a user.
+ AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
+ AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
+
+ // ApprovalForce forces the users to view the consent dialog
+ // and confirm the permissions request at the URL returned
+ // from AuthCodeURL, even if they've already done so.
+ ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
+)
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+ setValue(url.Values)
+}
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// to a provider's authorization endpoint.
+func SetAuthURLParam(key, value string) AuthCodeOption {
+ return setParam{key, value}
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-empty string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+// It can also be used to pass the PKCE challange.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+ var buf bytes.Buffer
+ buf.WriteString(c.Endpoint.AuthURL)
+ v := url.Values{
+ "response_type": {"code"},
+ "client_id": {c.ClientID},
+ }
+ if c.RedirectURL != "" {
+ v.Set("redirect_uri", c.RedirectURL)
+ }
+ if len(c.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.Scopes, " "))
+ }
+ if state != "" {
+ // TODO(light): Docs say never to omit state; don't allow empty.
+ v.Set("state", state)
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ if strings.Contains(c.Endpoint.AuthURL, "?") {
+ buf.WriteByte('&')
+ } else {
+ buf.WriteByte('?')
+ }
+ buf.WriteString(v.Encode())
+ return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The HTTP client to use is derived from the context.
+// If nil, http.DefaultClient is used.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+ v := url.Values{
+ "grant_type": {"password"},
+ "username": {username},
+ "password": {password},
+ }
+ if len(c.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.Scopes, " "))
+ }
+ return retrieveToken(ctx, c, v)
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The HTTP client to use is derived from the context.
+// If a client is not provided via the context, http.DefaultClient is used.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+//
+// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
+func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
+ v := url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ }
+ if c.RedirectURL != "" {
+ v.Set("redirect_uri", c.RedirectURL)
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ return retrieveToken(ctx, c, v)
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+ return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+ tkr := &tokenRefresher{
+ ctx: ctx,
+ conf: c,
+ }
+ if t != nil {
+ tkr.refreshToken = t.RefreshToken
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: tkr,
+ }
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+ ctx context.Context // used to get HTTP requests
+ conf *Config
+ refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+ if tf.refreshToken == "" {
+ return nil, errors.New("oauth2: token expired and refresh token is not set")
+ }
+
+ tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+ "grant_type": {"refresh_token"},
+ "refresh_token": {tf.refreshToken},
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ if tf.refreshToken != tk.RefreshToken {
+ tf.refreshToken = tk.RefreshToken
+ }
+ return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+ new TokenSource // called when t is expired.
+
+ mu sync.Mutex // guards t
+ t *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.t.Valid() {
+ return s.t, nil
+ }
+ t, err := s.new.Token()
+ if err != nil {
+ return nil, err
+ }
+ s.t = t
+ return t, nil
+}
+
+// StaticTokenSource returns a TokenSource that always returns the same token.
+// Because the provided token t is never refreshed, StaticTokenSource is only
+// useful for tokens that never expire.
+func StaticTokenSource(t *Token) TokenSource {
+ return staticTokenSource{t}
+}
+
+// staticTokenSource is a TokenSource that always returns the same Token.
+type staticTokenSource struct {
+ t *Token
+}
+
+func (s staticTokenSource) Token() (*Token, error) {
+ return s.t, nil
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient internal.ContextKey
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// Note that if a custom *http.Client is provided via the Context it
+// is used only for token acquisition and is not used to configure the
+// *http.Client returned from NewClient.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+ if src == nil {
+ return internal.ContextClient(ctx)
+ }
+ return &http.Client{
+ Transport: &Transport{
+ Base: internal.ContextClient(ctx).Transport,
+ Source: ReuseTokenSource(nil, src),
+ },
+ }
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+ // Don't wrap a reuseTokenSource in itself. That would work,
+ // but cause an unnecessary number of mutex operations.
+ // Just build the equivalent one.
+ if rt, ok := src.(*reuseTokenSource); ok {
+ if t == nil {
+ // Just use it directly.
+ return rt
+ }
+ src = rt.new
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: src,
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
new file mode 100644
index 00000000..34db8cdc
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -0,0 +1,175 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2/internal"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the credentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string `json:"access_token"`
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string `json:"token_type,omitempty"`
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string `json:"refresh_token,omitempty"`
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time `json:"expiry,omitempty"`
+
+ // raw optionally contains extra metadata from the server
+ // when updating a token.
+ raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+ if strings.EqualFold(t.TokenType, "bearer") {
+ return "Bearer"
+ }
+ if strings.EqualFold(t.TokenType, "mac") {
+ return "MAC"
+ }
+ if strings.EqualFold(t.TokenType, "basic") {
+ return "Basic"
+ }
+ if t.TokenType != "" {
+ return t.TokenType
+ }
+ return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+ r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+ t2 := new(Token)
+ *t2 = *t
+ t2.raw = extra
+ return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+ if raw, ok := t.raw.(map[string]interface{}); ok {
+ return raw[key]
+ }
+
+ vals, ok := t.raw.(url.Values)
+ if !ok {
+ return nil
+ }
+
+ v := vals.Get(key)
+ switch s := strings.TrimSpace(v); strings.Count(s, ".") {
+ case 0: // Contains no "."; try to parse as int
+ if i, err := strconv.ParseInt(s, 10, 64); err == nil {
+ return i
+ }
+ case 1: // Contains a single "."; try to parse as float
+ if f, err := strconv.ParseFloat(s, 64); err == nil {
+ return f
+ }
+ }
+
+ return v
+}
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+ if t.Expiry.IsZero() {
+ return false
+ }
+ return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+ return t != nil && t.AccessToken != "" && !t.expired()
+}
+
+// tokenFromInternal maps an *internal.Token struct into
+// a *Token struct.
+func tokenFromInternal(t *internal.Token) *Token {
+ if t == nil {
+ return nil
+ }
+ return &Token{
+ AccessToken: t.AccessToken,
+ TokenType: t.TokenType,
+ RefreshToken: t.RefreshToken,
+ Expiry: t.Expiry,
+ raw: t.Raw,
+ }
+}
+
+// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
+// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
+// with an error..
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+ tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+ if err != nil {
+ if rErr, ok := err.(*internal.RetrieveError); ok {
+ return nil, (*RetrieveError)(rErr)
+ }
+ return nil, err
+ }
+ return tokenFromInternal(tk), nil
+}
+
+// RetrieveError is the error returned when the token endpoint returns a
+// non-2XX HTTP status code.
+type RetrieveError struct {
+ Response *http.Response
+ // Body is the body that was consumed by reading Response.Body.
+ // It may be truncated.
+ Body []byte
+}
+
+func (r *RetrieveError) Error() string {
+ return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
new file mode 100644
index 00000000..aa0d34f1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+ // Source supplies the token to add to outgoing requests'
+ // Authorization headers.
+ Source TokenSource
+
+ // Base is the base RoundTripper used to make HTTP requests.
+ // If nil, http.DefaultTransport is used.
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token from Transport's Source.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqBodyClosed := false
+ if req.Body != nil {
+ defer func() {
+ if !reqBodyClosed {
+ req.Body.Close()
+ }
+ }()
+ }
+
+ if t.Source == nil {
+ return nil, errors.New("oauth2: Transport's Source is nil")
+ }
+ token, err := t.Source.Token()
+ if err != nil {
+ return nil, err
+ }
+
+ req2 := cloneRequest(req) // per RoundTripper contract
+ token.SetAuthHeader(req2)
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+
+ // req.Body is assumed to have been closed by the base RoundTripper.
+ reqBodyClosed = true
+
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *Transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 00000000..16f87c5d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,660 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build go1.7
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+ defaultTicketSuffix = "/default.20150612t184001.0"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+
+ defaultTicketOnce sync.Once
+ defaultTicket string
+ backgroundContextOnce sync.Once
+ backgroundContext netcontext.Context
+)
+
+func apiURL() *url.URL {
+ host, port := "appengine.googleapis.internal", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host + ":" + port,
+ Path: apiPath,
+ }
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ }
+ r = r.WithContext(withContext(r.Context(), c))
+ c.req = r
+
+ stopFlushing := make(chan int)
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go c.flushLog(false)
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+
+ apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+// jointContext joins two contexts in a superficial way.
+// It takes values and timeouts from a base context, and only values from another context.
+type jointContext struct {
+ base netcontext.Context
+ valuesOnly netcontext.Context
+}
+
+func (c jointContext) Deadline() (time.Time, bool) {
+ return c.base.Deadline()
+}
+
+func (c jointContext) Done() <-chan struct{} {
+ return c.base.Done()
+}
+
+func (c jointContext) Err() error {
+ return c.base.Err()
+}
+
+func (c jointContext) Value(key interface{}) interface{} {
+ if val := c.base.Value(key); val != nil {
+ return val
+ }
+ return c.valuesOnly.Value(key)
+}
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = withNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return req.Context()
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ return jointContext{
+ base: parent,
+ valuesOnly: req.Context(),
+ }
+}
+
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+ defaultTicketOnce.Do(func() {
+ if IsDevAppServer() {
+ defaultTicket = "testapp" + defaultTicketSuffix
+ return
+ }
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+ })
+ return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+ backgroundContextOnce.Do(func() {
+ // Compute background security ticket.
+ ticket := DefaultTicket()
+
+ c := &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ }
+ backgroundContext = toContext(c)
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go c.logFlusher(make(chan int))
+ })
+
+ return backgroundContext
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ }
+ ctx := withContext(decorate(req.Context()), c)
+ req = req.WithContext(ctx)
+ c.req = req
+ return req, func() {}
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ hreq := &http.Request{
+ Method: "POST",
+ URL: c.apiURL,
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: c.apiURL.Host,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ // Use a test ticket under test environment.
+ if ticket == "" {
+ if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+ ticket = appid.(string) + defaultTicketSuffix
+ }
+ }
+ // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+ if ticket == "" {
+ ticket = DefaultTicket()
+ }
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+ if c == nil {
+ panic("not an App Engine context")
+ }
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 00000000..f0f40b2e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,169 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine_internal"
+ basepb "appengine_internal/base"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) appengine.Context {
+ c, _ := ctx.Value(&contextKey).(appengine.Context)
+ return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
+ c := fromContext(ctx)
+ if c == nil {
+ return nil, errNotAppEngineContext
+ }
+ return c, nil
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+
+ s := &basepb.StringProto{}
+ c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+ if ns := s.GetValue(); ns != "" {
+ ctx = NamespacedContext(ctx, ns)
+ }
+
+ return ctx
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ if req, ok := c.Request().(*http.Request); ok {
+ return req.Header
+ }
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return WithContext(netcontext.Background(), req)
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ c := appengine.NewContext(req)
+ return withContext(parent, c)
+}
+
+type testingContext struct {
+ appengine.Context
+
+ req *http.Request
+}
+
+func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
+func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
+ if service == "__go__" && method == "GetNamespace" {
+ return nil
+ }
+ return fmt.Errorf("testingContext: unsupported Call")
+}
+func (t *testingContext) Request() interface{} { return t.req }
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return withContext(netcontext.Background(), &testingContext{req: req})
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ var opts *appengine_internal.CallOptions
+ if d, ok := ctx.Deadline(); ok {
+ opts = &appengine_internal.CallOptions{
+ Timeout: d.Sub(time.Now()),
+ }
+ }
+
+ err := c.Call(service, method, in, out, opts)
+ switch v := err.(type) {
+ case *appengine_internal.APIError:
+ return &APIError{
+ Service: v.Service,
+ Detail: v.Detail,
+ Code: v.Code,
+ }
+ case *appengine_internal.CallError:
+ return &CallError{
+ Detail: v.Detail,
+ Code: v.Code,
+ Timeout: v.Timeout,
+ }
+ }
+ return err
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+ var fn func(format string, args ...interface{})
+ switch level {
+ case 0:
+ fn = c.Debugf
+ case 1:
+ fn = c.Infof
+ case 2:
+ fn = c.Warningf
+ case 3:
+ fn = c.Errorf
+ case 4:
+ fn = c.Criticalf
+ default:
+ // This shouldn't happen.
+ fn = c.Criticalf
+ }
+ fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 00000000..e0c0b214
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,123 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "errors"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var errNotAppEngineContext = errors.New("not an App Engine context")
+
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds []CallOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+ // We avoid appending to any existing call override
+ // so we don't risk overwriting a popped stack below.
+ var cofs []CallOverrideFunc
+ if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
+ cofs = append(cofs, uf...)
+ }
+ cofs = append(cofs, f)
+ return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+}
+
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+ cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
+ if len(cofs) == 0 {
+ return nil, nil, false
+ }
+ // We found a list of overrides; grab the last, and reconstitute a
+ // context that will hide it.
+ f := cofs[len(cofs)-1]
+ ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+ return f, ctx, true
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+ return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+ return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+ return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+ // If there's no namespace, return the empty string.
+ ns, _ := ctx.Value(&namespaceKey).(string)
+ return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+ if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+ return id
+ }
+ return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+ if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+ f(level, format, args...)
+ return
+ }
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ logf(c, level, format, args...)
+}
+
+// NamespacedContext wraps a Context to support namespaces.
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+ return withNamespace(ctx, namespace)
+}
+
+// SetTestEnv sets the env variables for testing background ticket in Flex.
+func SetTestEnv() func() {
+ var environ = []struct {
+ key, value string
+ }{
+ {"GAE_LONG_APP_ID", "my-app-id"},
+ {"GAE_MINOR_VERSION", "067924799508853122"},
+ {"GAE_MODULE_INSTANCE", "0"},
+ {"GAE_MODULE_NAME", "default"},
+ {"GAE_MODULE_VERSION", "20150612t184001"},
+ }
+
+ for _, v := range environ {
+ old := os.Getenv(v.key)
+ os.Setenv(v.key, v.value)
+ v.value = old
+ }
+ return func() { // Restore old environment after the test completes.
+ for _, v := range environ {
+ if v.value == "" {
+ os.Unsetenv(v.key)
+ continue
+ }
+ os.Setenv(v.key, v.value)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_pre17.go b/vendor/google.golang.org/appengine/internal/api_pre17.go
new file mode 100644
index 00000000..028b4f05
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_pre17.go
@@ -0,0 +1,682 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build !go1.7
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+ defaultTicketSuffix = "/default.20150612t184001.0"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+
+ defaultTicketOnce sync.Once
+ defaultTicket string
+)
+
+func apiURL() *url.URL {
+ host, port := "appengine.googleapis.internal", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host + ":" + port,
+ Path: apiPath,
+ }
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ }
+ stopFlushing := make(chan int)
+
+ ctxs.Lock()
+ ctxs.m[r] = c
+ ctxs.Unlock()
+ defer func() {
+ ctxs.Lock()
+ delete(ctxs.m, r)
+ ctxs.Unlock()
+ }()
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go c.flushLog(false)
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+var ctxs = struct {
+ sync.Mutex
+ m map[*http.Request]*context
+ bg *context // background context, lazily initialized
+ // dec is used by tests to decorate the netcontext.Context returned
+ // for a given request. This allows tests to add overrides (such as
+ // WithAppIDOverride) to the context. The map is nil outside tests.
+ dec map[*http.Request]func(netcontext.Context) netcontext.Context
+}{
+ m: make(map[*http.Request]*context),
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+
+ apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = withNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return WithContext(netcontext.Background(), req)
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ ctxs.Lock()
+ c := ctxs.m[req]
+ d := ctxs.dec[req]
+ ctxs.Unlock()
+
+ if d != nil {
+ parent = d(parent)
+ }
+
+ if c == nil {
+ // Someone passed in an http.Request that is not in-flight.
+ // We panic here rather than panicking at a later point
+ // so that stack traces will be more sensible.
+ log.Panic("appengine: NewContext passed an unknown http.Request")
+ }
+ return withContext(parent, c)
+}
+
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+ defaultTicketOnce.Do(func() {
+ if IsDevAppServer() {
+ defaultTicket = "testapp" + defaultTicketSuffix
+ return
+ }
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+ })
+ return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+ ctxs.Lock()
+ defer ctxs.Unlock()
+
+ if ctxs.bg != nil {
+ return toContext(ctxs.bg)
+ }
+
+ // Compute background security ticket.
+ ticket := DefaultTicket()
+
+ ctxs.bg = &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ }
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go ctxs.bg.logFlusher(make(chan int))
+
+ return toContext(ctxs.bg)
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ }
+ ctxs.Lock()
+ defer ctxs.Unlock()
+ if _, ok := ctxs.m[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if _, ok := ctxs.dec[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if ctxs.dec == nil {
+ ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
+ }
+ ctxs.m[req] = c
+ ctxs.dec[req] = decorate
+
+ return req, func() {
+ ctxs.Lock()
+ delete(ctxs.m, req)
+ delete(ctxs.dec, req)
+ ctxs.Unlock()
+ }
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ hreq := &http.Request{
+ Method: "POST",
+ URL: c.apiURL,
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: c.apiURL.Host,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ // Use a test ticket under test environment.
+ if ticket == "" {
+ if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+ ticket = appid.(string) + defaultTicketSuffix
+ }
+ }
+ // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+ if ticket == "" {
+ ticket = DefaultTicket()
+ }
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+ if c == nil {
+ panic("not an App Engine context")
+ }
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 00000000..11df8c07
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+ if i := strings.Index(appid, "~"); i != -1 {
+ partition, appid = appid[:i], appid[i+1:]
+ }
+ if i := strings.Index(appid, ":"); i != -1 {
+ domain, appid = appid[:i], appid[i+1:]
+ }
+ return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+ _, dom, dis := parseFullAppID(fullAppID)
+ if dom != "" {
+ return dom + ":" + dis
+ }
+ return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 00000000..db4777e6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,308 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type StringProto struct {
+ Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StringProto) Reset() { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage() {}
+func (*StringProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
+}
+func (m *StringProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StringProto.Unmarshal(m, b)
+}
+func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
+}
+func (dst *StringProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StringProto.Merge(dst, src)
+}
+func (m *StringProto) XXX_Size() int {
+ return xxx_messageInfo_StringProto.Size(m)
+}
+func (m *StringProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_StringProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StringProto proto.InternalMessageInfo
+
+func (m *StringProto) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Integer32Proto struct {
+ Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage() {}
+func (*Integer32Proto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
+}
+func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
+}
+func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
+}
+func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Integer32Proto.Merge(dst, src)
+}
+func (m *Integer32Proto) XXX_Size() int {
+ return xxx_messageInfo_Integer32Proto.Size(m)
+}
+func (m *Integer32Proto) XXX_DiscardUnknown() {
+ xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
+
+func (m *Integer32Proto) GetValue() int32 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Integer64Proto struct {
+ Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage() {}
+func (*Integer64Proto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
+}
+func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
+}
+func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
+}
+func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Integer64Proto.Merge(dst, src)
+}
+func (m *Integer64Proto) XXX_Size() int {
+ return xxx_messageInfo_Integer64Proto.Size(m)
+}
+func (m *Integer64Proto) XXX_DiscardUnknown() {
+ xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
+
+func (m *Integer64Proto) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BoolProto struct {
+ Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BoolProto) Reset() { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage() {}
+func (*BoolProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
+}
+func (m *BoolProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BoolProto.Unmarshal(m, b)
+}
+func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
+}
+func (dst *BoolProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BoolProto.Merge(dst, src)
+}
+func (m *BoolProto) XXX_Size() int {
+ return xxx_messageInfo_BoolProto.Size(m)
+}
+func (m *BoolProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_BoolProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BoolProto proto.InternalMessageInfo
+
+func (m *BoolProto) GetValue() bool {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return false
+}
+
+type DoubleProto struct {
+ Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DoubleProto) Reset() { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage() {}
+func (*DoubleProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
+}
+func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
+}
+func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
+}
+func (dst *DoubleProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DoubleProto.Merge(dst, src)
+}
+func (m *DoubleProto) XXX_Size() int {
+ return xxx_messageInfo_DoubleProto.Size(m)
+}
+func (m *DoubleProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_DoubleProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
+
+func (m *DoubleProto) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BytesProto struct {
+ Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BytesProto) Reset() { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage() {}
+func (*BytesProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
+}
+func (m *BytesProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BytesProto.Unmarshal(m, b)
+}
+func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
+}
+func (dst *BytesProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BytesProto.Merge(dst, src)
+}
+func (m *BytesProto) XXX_Size() int {
+ return xxx_messageInfo_BytesProto.Size(m)
+}
+func (m *BytesProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_BytesProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesProto proto.InternalMessageInfo
+
+func (m *BytesProto) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type VoidProto struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *VoidProto) Reset() { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage() {}
+func (*VoidProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
+}
+func (m *VoidProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_VoidProto.Unmarshal(m, b)
+}
+func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
+}
+func (dst *VoidProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VoidProto.Merge(dst, src)
+}
+func (m *VoidProto) XXX_Size() int {
+ return xxx_messageInfo_VoidProto.Size(m)
+}
+func (m *VoidProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_VoidProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VoidProto proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
+ proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
+ proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
+ proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
+ proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
+ proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
+ proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
+}
+
+var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
+ // 199 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
+ 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
+ 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
+ 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
+ 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
+ 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
+ 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
+ 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
+ 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
+ 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
+ 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
+ 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
+ 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 00000000..56cd7a3c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+ required string value = 1;
+}
+
+message Integer32Proto {
+ required int32 value = 1;
+}
+
+message Integer64Proto {
+ required int64 value = 1;
+}
+
+message BoolProto {
+ required bool value = 1;
+}
+
+message DoubleProto {
+ required double value = 1;
+}
+
+message BytesProto {
+ required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 00000000..2fb74828
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,4367 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Property_Meaning int32
+
+const (
+ Property_NO_MEANING Property_Meaning = 0
+ Property_BLOB Property_Meaning = 14
+ Property_TEXT Property_Meaning = 15
+ Property_BYTESTRING Property_Meaning = 16
+ Property_ATOM_CATEGORY Property_Meaning = 1
+ Property_ATOM_LINK Property_Meaning = 2
+ Property_ATOM_TITLE Property_Meaning = 3
+ Property_ATOM_CONTENT Property_Meaning = 4
+ Property_ATOM_SUMMARY Property_Meaning = 5
+ Property_ATOM_AUTHOR Property_Meaning = 6
+ Property_GD_WHEN Property_Meaning = 7
+ Property_GD_EMAIL Property_Meaning = 8
+ Property_GEORSS_POINT Property_Meaning = 9
+ Property_GD_IM Property_Meaning = 10
+ Property_GD_PHONENUMBER Property_Meaning = 11
+ Property_GD_POSTALADDRESS Property_Meaning = 12
+ Property_GD_RATING Property_Meaning = 13
+ Property_BLOBKEY Property_Meaning = 17
+ Property_ENTITY_PROTO Property_Meaning = 19
+ Property_INDEX_VALUE Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+ 0: "NO_MEANING",
+ 14: "BLOB",
+ 15: "TEXT",
+ 16: "BYTESTRING",
+ 1: "ATOM_CATEGORY",
+ 2: "ATOM_LINK",
+ 3: "ATOM_TITLE",
+ 4: "ATOM_CONTENT",
+ 5: "ATOM_SUMMARY",
+ 6: "ATOM_AUTHOR",
+ 7: "GD_WHEN",
+ 8: "GD_EMAIL",
+ 9: "GEORSS_POINT",
+ 10: "GD_IM",
+ 11: "GD_PHONENUMBER",
+ 12: "GD_POSTALADDRESS",
+ 13: "GD_RATING",
+ 17: "BLOBKEY",
+ 19: "ENTITY_PROTO",
+ 18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+ "NO_MEANING": 0,
+ "BLOB": 14,
+ "TEXT": 15,
+ "BYTESTRING": 16,
+ "ATOM_CATEGORY": 1,
+ "ATOM_LINK": 2,
+ "ATOM_TITLE": 3,
+ "ATOM_CONTENT": 4,
+ "ATOM_SUMMARY": 5,
+ "ATOM_AUTHOR": 6,
+ "GD_WHEN": 7,
+ "GD_EMAIL": 8,
+ "GEORSS_POINT": 9,
+ "GD_IM": 10,
+ "GD_PHONENUMBER": 11,
+ "GD_POSTALADDRESS": 12,
+ "GD_RATING": 13,
+ "BLOBKEY": 17,
+ "ENTITY_PROTO": 19,
+ "INDEX_VALUE": 18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+ p := new(Property_Meaning)
+ *p = x
+ return p
+}
+func (x Property_Meaning) String() string {
+ return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+ if err != nil {
+ return err
+ }
+ *x = Property_Meaning(value)
+ return nil
+}
+func (Property_Meaning) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0}
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+ Property_HTML Property_FtsTokenizationOption = 1
+ Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+ 1: "HTML",
+ 2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+ "HTML": 1,
+ "ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+ p := new(Property_FtsTokenizationOption)
+ *p = x
+ return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+ return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+ if err != nil {
+ return err
+ }
+ *x = Property_FtsTokenizationOption(value)
+ return nil
+}
+func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1}
+}
+
+type EntityProto_Kind int32
+
+const (
+ EntityProto_GD_CONTACT EntityProto_Kind = 1
+ EntityProto_GD_EVENT EntityProto_Kind = 2
+ EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+ 1: "GD_CONTACT",
+ 2: "GD_EVENT",
+ 3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+ "GD_CONTACT": 1,
+ "GD_EVENT": 2,
+ "GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+ p := new(EntityProto_Kind)
+ *p = x
+ return p
+}
+func (x EntityProto_Kind) String() string {
+ return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+ if err != nil {
+ return err
+ }
+ *x = EntityProto_Kind(value)
+ return nil
+}
+func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0}
+}
+
+type Index_Property_Direction int32
+
+const (
+ Index_Property_ASCENDING Index_Property_Direction = 1
+ Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+ p := new(Index_Property_Direction)
+ *p = x
+ return p
+}
+func (x Index_Property_Direction) String() string {
+ return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Index_Property_Direction(value)
+ return nil
+}
+func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0}
+}
+
+type CompositeIndex_State int32
+
+const (
+ CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+ CompositeIndex_READ_WRITE CompositeIndex_State = 2
+ CompositeIndex_DELETED CompositeIndex_State = 3
+ CompositeIndex_ERROR CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+ 1: "WRITE_ONLY",
+ 2: "READ_WRITE",
+ 3: "DELETED",
+ 4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+ "WRITE_ONLY": 1,
+ "READ_WRITE": 2,
+ "DELETED": 3,
+ "ERROR": 4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+ p := new(CompositeIndex_State)
+ *p = x
+ return p
+}
+func (x CompositeIndex_State) String() string {
+ return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+ if err != nil {
+ return err
+ }
+ *x = CompositeIndex_State(value)
+ return nil
+}
+func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0}
+}
+
+type Snapshot_Status int32
+
+const (
+ Snapshot_INACTIVE Snapshot_Status = 0
+ Snapshot_ACTIVE Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+ 0: "INACTIVE",
+ 1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+ "INACTIVE": 0,
+ "ACTIVE": 1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+ p := new(Snapshot_Status)
+ *p = x
+ return p
+}
+func (x Snapshot_Status) String() string {
+ return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+ if err != nil {
+ return err
+ }
+ *x = Snapshot_Status(value)
+ return nil
+}
+func (Snapshot_Status) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0}
+}
+
+type Query_Hint int32
+
+const (
+ Query_ORDER_FIRST Query_Hint = 1
+ Query_ANCESTOR_FIRST Query_Hint = 2
+ Query_FILTER_FIRST Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+ 1: "ORDER_FIRST",
+ 2: "ANCESTOR_FIRST",
+ 3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+ "ORDER_FIRST": 1,
+ "ANCESTOR_FIRST": 2,
+ "FILTER_FIRST": 3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+ p := new(Query_Hint)
+ *p = x
+ return p
+}
+func (x Query_Hint) String() string {
+ return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+ if err != nil {
+ return err
+ }
+ *x = Query_Hint(value)
+ return nil
+}
+func (Query_Hint) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
+}
+
+type Query_Filter_Operator int32
+
+const (
+ Query_Filter_LESS_THAN Query_Filter_Operator = 1
+ Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
+ Query_Filter_GREATER_THAN Query_Filter_Operator = 3
+ Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+ Query_Filter_EQUAL Query_Filter_Operator = 5
+ Query_Filter_IN Query_Filter_Operator = 6
+ Query_Filter_EXISTS Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+ 1: "LESS_THAN",
+ 2: "LESS_THAN_OR_EQUAL",
+ 3: "GREATER_THAN",
+ 4: "GREATER_THAN_OR_EQUAL",
+ 5: "EQUAL",
+ 6: "IN",
+ 7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+ "LESS_THAN": 1,
+ "LESS_THAN_OR_EQUAL": 2,
+ "GREATER_THAN": 3,
+ "GREATER_THAN_OR_EQUAL": 4,
+ "EQUAL": 5,
+ "IN": 6,
+ "EXISTS": 7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+ p := new(Query_Filter_Operator)
+ *p = x
+ return p
+}
+func (x Query_Filter_Operator) String() string {
+ return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = Query_Filter_Operator(value)
+ return nil
+}
+func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0}
+}
+
+type Query_Order_Direction int32
+
+const (
+ Query_Order_ASCENDING Query_Order_Direction = 1
+ Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+ p := new(Query_Order_Direction)
+ *p = x
+ return p
+}
+func (x Query_Order_Direction) String() string {
+ return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Query_Order_Direction(value)
+ return nil
+}
+func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0}
+}
+
+type Error_ErrorCode int32
+
+const (
+ Error_BAD_REQUEST Error_ErrorCode = 1
+ Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
+ Error_INTERNAL_ERROR Error_ErrorCode = 3
+ Error_NEED_INDEX Error_ErrorCode = 4
+ Error_TIMEOUT Error_ErrorCode = 5
+ Error_PERMISSION_DENIED Error_ErrorCode = 6
+ Error_BIGTABLE_ERROR Error_ErrorCode = 7
+ Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+ Error_CAPABILITY_DISABLED Error_ErrorCode = 9
+ Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
+ Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+ 1: "BAD_REQUEST",
+ 2: "CONCURRENT_TRANSACTION",
+ 3: "INTERNAL_ERROR",
+ 4: "NEED_INDEX",
+ 5: "TIMEOUT",
+ 6: "PERMISSION_DENIED",
+ 7: "BIGTABLE_ERROR",
+ 8: "COMMITTED_BUT_STILL_APPLYING",
+ 9: "CAPABILITY_DISABLED",
+ 10: "TRY_ALTERNATE_BACKEND",
+ 11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+ "BAD_REQUEST": 1,
+ "CONCURRENT_TRANSACTION": 2,
+ "INTERNAL_ERROR": 3,
+ "NEED_INDEX": 4,
+ "TIMEOUT": 5,
+ "PERMISSION_DENIED": 6,
+ "BIGTABLE_ERROR": 7,
+ "COMMITTED_BUT_STILL_APPLYING": 8,
+ "CAPABILITY_DISABLED": 9,
+ "TRY_ALTERNATE_BACKEND": 10,
+ "SAFE_TIME_TOO_OLD": 11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+ p := new(Error_ErrorCode)
+ *p = x
+ return p
+}
+func (x Error_ErrorCode) String() string {
+ return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = Error_ErrorCode(value)
+ return nil
+}
+func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0}
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+ PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
+ PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+ 0: "CURRENT",
+ 1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+ "CURRENT": 0,
+ "SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+ p := new(PutRequest_AutoIdPolicy)
+ *p = x
+ return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+ return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+ if err != nil {
+ return err
+ }
+ *x = PutRequest_AutoIdPolicy(value)
+ return nil
+}
+func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0}
+}
+
+type BeginTransactionRequest_TransactionMode int32
+
+const (
+ BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0
+ BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1
+ BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2
+)
+
+var BeginTransactionRequest_TransactionMode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "READ_ONLY",
+ 2: "READ_WRITE",
+}
+var BeginTransactionRequest_TransactionMode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "READ_ONLY": 1,
+ "READ_WRITE": 2,
+}
+
+func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode {
+ p := new(BeginTransactionRequest_TransactionMode)
+ *p = x
+ return p
+}
+func (x BeginTransactionRequest_TransactionMode) String() string {
+ return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x))
+}
+func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode")
+ if err != nil {
+ return err
+ }
+ *x = BeginTransactionRequest_TransactionMode(value)
+ return nil
+}
+func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0}
+}
+
+type Action struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Action) Reset() { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage() {}
+func (*Action) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0}
+}
+func (m *Action) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Action.Unmarshal(m, b)
+}
+func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Action.Marshal(b, m, deterministic)
+}
+func (dst *Action) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Action.Merge(dst, src)
+}
+func (m *Action) XXX_Size() int {
+ return xxx_messageInfo_Action.Size(m)
+}
+func (m *Action) XXX_DiscardUnknown() {
+ xxx_messageInfo_Action.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Action proto.InternalMessageInfo
+
+type PropertyValue struct {
+ Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+ BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+ Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
+ Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
+ Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue) Reset() { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage() {}
+func (*PropertyValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1}
+}
+func (m *PropertyValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue.Unmarshal(m, b)
+}
+func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue.Merge(dst, src)
+}
+func (m *PropertyValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue.Size(m)
+}
+func (m *PropertyValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue proto.InternalMessageInfo
+
+func (m *PropertyValue) GetInt64Value() int64 {
+ if m != nil && m.Int64Value != nil {
+ return *m.Int64Value
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+ if m != nil && m.BooleanValue != nil {
+ return *m.BooleanValue
+ }
+ return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+ if m != nil {
+ return m.Pointvalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+ if m != nil {
+ return m.Uservalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+ if m != nil {
+ return m.Referencevalue
+ }
+ return nil
+}
+
+type PropertyValue_PointValue struct {
+ X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+ Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage() {}
+func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0}
+}
+func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b)
+}
+func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src)
+}
+func (m *PropertyValue_PointValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_PointValue.Size(m)
+}
+func (m *PropertyValue_PointValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type PropertyValue_UserValue struct {
+ Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage() {}
+func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1}
+}
+func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b)
+}
+func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src)
+}
+func (m *PropertyValue_UserValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_UserValue.Size(m)
+}
+func (m *PropertyValue_UserValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
+ Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage() {}
+func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2}
+}
+func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b)
+}
+func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src)
+}
+func (m *PropertyValue_ReferenceValue) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m)
+}
+func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+ if m != nil {
+ return m.Pathelement
+ }
+ return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+ Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+ *m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
+func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b)
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic)
+}
+func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src)
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int {
+ return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m)
+}
+func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() {
+ xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Property struct {
+ Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+ MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"`
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+ Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+ Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+ FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+ Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+func (*Property) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2}
+}
+func (m *Property) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Property.Unmarshal(m, b)
+}
+func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Property.Marshal(b, m, deterministic)
+}
+func (dst *Property) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Property.Merge(dst, src)
+}
+func (m *Property) XXX_Size() int {
+ return xxx_messageInfo_Property.Size(m)
+}
+func (m *Property) XXX_DiscardUnknown() {
+ xxx_messageInfo_Property.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Property proto.InternalMessageInfo
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+ if m != nil && m.Meaning != nil {
+ return *m.Meaning
+ }
+ return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+ if m != nil && m.MeaningUri != nil {
+ return *m.MeaningUri
+ }
+ return ""
+}
+
+func (m *Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Property) GetMultiple() bool {
+ if m != nil && m.Multiple != nil {
+ return *m.Multiple
+ }
+ return false
+}
+
+func (m *Property) GetSearchable() bool {
+ if m != nil && m.Searchable != nil {
+ return *m.Searchable
+ }
+ return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+ if m != nil && m.FtsTokenizationOption != nil {
+ return *m.FtsTokenizationOption
+ }
+ return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+ if m != nil && m.Locale != nil {
+ return *m.Locale
+ }
+ return Default_Property_Locale
+}
+
+type Path struct {
+ Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Path) Reset() { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage() {}
+func (*Path) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3}
+}
+func (m *Path) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Path.Unmarshal(m, b)
+}
+func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Path.Marshal(b, m, deterministic)
+}
+func (dst *Path) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Path.Merge(dst, src)
+}
+func (m *Path) XXX_Size() int {
+ return xxx_messageInfo_Path.Size(m)
+}
+func (m *Path) XXX_DiscardUnknown() {
+ xxx_messageInfo_Path.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Path proto.InternalMessageInfo
+
+func (m *Path) GetElement() []*Path_Element {
+ if m != nil {
+ return m.Element
+ }
+ return nil
+}
+
+type Path_Element struct {
+ Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Path_Element) Reset() { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage() {}
+func (*Path_Element) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0}
+}
+func (m *Path_Element) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Path_Element.Unmarshal(m, b)
+}
+func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic)
+}
+func (dst *Path_Element) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Path_Element.Merge(dst, src)
+}
+func (m *Path_Element) XXX_Size() int {
+ return xxx_messageInfo_Path_Element.Size(m)
+}
+func (m *Path_Element) XXX_DiscardUnknown() {
+ xxx_messageInfo_Path_Element.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Path_Element proto.InternalMessageInfo
+
+func (m *Path_Element) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *Path_Element) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Reference struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
+ Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+func (*Reference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4}
+}
+func (m *Reference) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Reference.Unmarshal(m, b)
+}
+func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
+}
+func (dst *Reference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Reference.Merge(dst, src)
+}
+func (m *Reference) XXX_Size() int {
+ return xxx_messageInfo_Reference.Size(m)
+}
+func (m *Reference) XXX_DiscardUnknown() {
+ xxx_messageInfo_Reference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Reference proto.InternalMessageInfo
+
+func (m *Reference) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Reference) GetPath() *Path {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+type User struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+func (*User) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5}
+}
+func (m *User) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_User.Unmarshal(m, b)
+}
+func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_User.Marshal(b, m, deterministic)
+}
+func (dst *User) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_User.Merge(dst, src)
+}
+func (m *User) XXX_Size() int {
+ return xxx_messageInfo_User.Size(m)
+}
+func (m *User) XXX_DiscardUnknown() {
+ xxx_messageInfo_User.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_User proto.InternalMessageInfo
+
+func (m *User) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *User) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *User) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type EntityProto struct {
+ Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+ EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
+ Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+ Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+ KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
+ Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EntityProto) Reset() { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage() {}
+func (*EntityProto) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6}
+}
+func (m *EntityProto) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EntityProto.Unmarshal(m, b)
+}
+func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic)
+}
+func (dst *EntityProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EntityProto.Merge(dst, src)
+}
+func (m *EntityProto) XXX_Size() int {
+ return xxx_messageInfo_EntityProto.Size(m)
+}
+func (m *EntityProto) XXX_DiscardUnknown() {
+ xxx_messageInfo_EntityProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EntityProto proto.InternalMessageInfo
+
+func (m *EntityProto) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+ if m != nil {
+ return m.EntityGroup
+ }
+ return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+ if m != nil {
+ return m.Owner
+ }
+ return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+ if m != nil && m.KindUri != nil {
+ return *m.KindUri
+ }
+ return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+ if m != nil {
+ return m.RawProperty
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+ if m != nil && m.Rank != nil {
+ return *m.Rank
+ }
+ return 0
+}
+
+type CompositeProperty struct {
+ IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage() {}
+func (*CompositeProperty) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7}
+}
+func (m *CompositeProperty) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompositeProperty.Unmarshal(m, b)
+}
+func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic)
+}
+func (dst *CompositeProperty) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompositeProperty.Merge(dst, src)
+}
+func (m *CompositeProperty) XXX_Size() int {
+ return xxx_messageInfo_CompositeProperty.Size(m)
+}
+func (m *CompositeProperty) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompositeProperty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo
+
+func (m *CompositeProperty) GetIndexId() int64 {
+ if m != nil && m.IndexId != nil {
+ return *m.IndexId
+ }
+ return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Index struct {
+ EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
+ Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+ Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Index) Reset() { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage() {}
+func (*Index) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8}
+}
+func (m *Index) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Index.Unmarshal(m, b)
+}
+func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Index.Marshal(b, m, deterministic)
+}
+func (dst *Index) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Index.Merge(dst, src)
+}
+func (m *Index) XXX_Size() int {
+ return xxx_messageInfo_Index.Size(m)
+}
+func (m *Index) XXX_DiscardUnknown() {
+ xxx_messageInfo_Index.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Index proto.InternalMessageInfo
+
+func (m *Index) GetEntityType() string {
+ if m != nil && m.EntityType != nil {
+ return *m.EntityType
+ }
+ return ""
+}
+
+func (m *Index) GetAncestor() bool {
+ if m != nil && m.Ancestor != nil {
+ return *m.Ancestor
+ }
+ return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Index_Property struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Index_Property) Reset() { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage() {}
+func (*Index_Property) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0}
+}
+func (m *Index_Property) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Index_Property.Unmarshal(m, b)
+}
+func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic)
+}
+func (dst *Index_Property) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Index_Property.Merge(dst, src)
+}
+func (m *Index_Property) XXX_Size() int {
+ return xxx_messageInfo_Index_Property.Size(m)
+}
+func (m *Index_Property) XXX_DiscardUnknown() {
+ xxx_messageInfo_Index_Property.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Index_Property proto.InternalMessageInfo
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+ Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+ State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+ OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage() {}
+func (*CompositeIndex) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9}
+}
+func (m *CompositeIndex) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompositeIndex.Unmarshal(m, b)
+}
+func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic)
+}
+func (dst *CompositeIndex) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompositeIndex.Merge(dst, src)
+}
+func (m *CompositeIndex) XXX_Size() int {
+ return xxx_messageInfo_CompositeIndex.Size(m)
+}
+func (m *CompositeIndex) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompositeIndex.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+ if m != nil {
+ return m.Definition
+ }
+ return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+ if m != nil && m.State != nil {
+ return *m.State
+ }
+ return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+ if m != nil && m.OnlyUseIfRequired != nil {
+ return *m.OnlyUseIfRequired
+ }
+ return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+ IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
+ Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage() {}
+func (*IndexPostfix) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10}
+}
+func (m *IndexPostfix) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IndexPostfix.Unmarshal(m, b)
+}
+func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic)
+}
+func (dst *IndexPostfix) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexPostfix.Merge(dst, src)
+}
+func (m *IndexPostfix) XXX_Size() int {
+ return xxx_messageInfo_IndexPostfix.Size(m)
+}
+func (m *IndexPostfix) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexPostfix.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+ if m != nil {
+ return m.IndexValue
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+ PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage() {}
+func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0}
+}
+func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b)
+}
+func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic)
+}
+func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src)
+}
+func (m *IndexPostfix_IndexValue) XXX_Size() int {
+ return xxx_messageInfo_IndexPostfix_IndexValue.Size(m)
+}
+func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+ if m != nil && m.PropertyName != nil {
+ return *m.PropertyName
+ }
+ return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type IndexPosition struct {
+ Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *IndexPosition) Reset() { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage() {}
+func (*IndexPosition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11}
+}
+func (m *IndexPosition) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IndexPosition.Unmarshal(m, b)
+}
+func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic)
+}
+func (dst *IndexPosition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexPosition.Merge(dst, src)
+}
+func (m *IndexPosition) XXX_Size() int {
+ return xxx_messageInfo_IndexPosition.Size(m)
+}
+func (m *IndexPosition) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexPosition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexPosition proto.InternalMessageInfo
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+ Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Snapshot.Unmarshal(m, b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+}
+func (dst *Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Snapshot.Merge(dst, src)
+}
+func (m *Snapshot) XXX_Size() int {
+ return xxx_messageInfo_Snapshot.Size(m)
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+func (m *Snapshot) GetTs() int64 {
+ if m != nil && m.Ts != nil {
+ return *m.Ts
+ }
+ return 0
+}
+
+type InternalHeader struct {
+ Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *InternalHeader) Reset() { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage() {}
+func (*InternalHeader) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13}
+}
+func (m *InternalHeader) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_InternalHeader.Unmarshal(m, b)
+}
+func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic)
+}
+func (dst *InternalHeader) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InternalHeader.Merge(dst, src)
+}
+func (m *InternalHeader) XXX_Size() int {
+ return xxx_messageInfo_InternalHeader.Size(m)
+}
+func (m *InternalHeader) XXX_DiscardUnknown() {
+ xxx_messageInfo_InternalHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalHeader proto.InternalMessageInfo
+
+func (m *InternalHeader) GetQos() string {
+ if m != nil && m.Qos != nil {
+ return *m.Qos
+ }
+ return ""
+}
+
+type Transaction struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+ App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+ MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Transaction) Reset() { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage() {}
+func (*Transaction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14}
+}
+func (m *Transaction) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Transaction.Unmarshal(m, b)
+}
+func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
+}
+func (dst *Transaction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Transaction.Merge(dst, src)
+}
+func (m *Transaction) XXX_Size() int {
+ return xxx_messageInfo_Transaction.Size(m)
+}
+func (m *Transaction) XXX_DiscardUnknown() {
+ xxx_messageInfo_Transaction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Transaction proto.InternalMessageInfo
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+ if m != nil && m.Handle != nil {
+ return *m.Handle
+ }
+ return 0
+}
+
+func (m *Transaction) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+ Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
+ Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+ Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
+ SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
+ Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
+ Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+ Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
+ EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
+ RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
+ KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+ Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+ PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
+ GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
+ Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+ MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
+ SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
+ PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+func (*Query) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15}
+}
+func (m *Query) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Query.Unmarshal(m, b)
+}
+func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Query.Marshal(b, m, deterministic)
+}
+func (dst *Query) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Query.Merge(dst, src)
+}
+func (m *Query) XXX_Size() int {
+ return xxx_messageInfo_Query.Size(m)
+}
+func (m *Query) XXX_DiscardUnknown() {
+ xxx_messageInfo_Query.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Query proto.InternalMessageInfo
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Query) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Query) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Query) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+ if m != nil && m.SearchQuery != nil {
+ return *m.SearchQuery
+ }
+ return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+ if m != nil {
+ return m.Order
+ }
+ return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+ if m != nil && m.Hint != nil {
+ return *m.Hint
+ }
+ return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *Query) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.EndCompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+ if m != nil && m.RequirePerfectPlan != nil {
+ return *m.RequirePerfectPlan
+ }
+ return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *Query) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *Query) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *Query) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+ if m != nil {
+ return m.GroupByPropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+ if m != nil && m.MinSafeTimeSeconds != nil {
+ return *m.MinSafeTimeSeconds
+ }
+ return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+ if m != nil {
+ return m.SafeReplicaName
+ }
+ return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+ if m != nil && m.PersistOffset != nil {
+ return *m.PersistOffset
+ }
+ return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+ Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Query_Filter) Reset() { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage() {}
+func (*Query_Filter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
+}
+func (m *Query_Filter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Query_Filter.Unmarshal(m, b)
+}
+func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic)
+}
+func (dst *Query_Filter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Query_Filter.Merge(dst, src)
+}
+func (m *Query_Filter) XXX_Size() int {
+ return xxx_messageInfo_Query_Filter.Size(m)
+}
+func (m *Query_Filter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Query_Filter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Query_Filter proto.InternalMessageInfo
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+ if m != nil && m.Op != nil {
+ return *m.Op
+ }
+ return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Query_Order struct {
+ Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+ Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Query_Order) Reset() { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage() {}
+func (*Query_Order) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1}
+}
+func (m *Query_Order) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Query_Order.Unmarshal(m, b)
+}
+func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic)
+}
+func (dst *Query_Order) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Query_Order.Merge(dst, src)
+}
+func (m *Query_Order) XXX_Size() int {
+ return xxx_messageInfo_Query_Order.Size(m)
+}
+func (m *Query_Order) XXX_DiscardUnknown() {
+ xxx_messageInfo_Query_Order.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Query_Order proto.InternalMessageInfo
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+ Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
+ Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
+ IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
+ DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
+ Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage() {}
+func (*CompiledQuery) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16}
+}
+func (m *CompiledQuery) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery.Unmarshal(m, b)
+}
+func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery.Merge(dst, src)
+}
+func (m *CompiledQuery) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery.Size(m)
+}
+func (m *CompiledQuery) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+ if m != nil {
+ return m.Primaryscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+ if m != nil {
+ return m.Mergejoinscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+ if m != nil {
+ return m.IndexDef
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+ if m != nil && m.DistinctInfixSize != nil {
+ return *m.DistinctInfixSize
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+ if m != nil {
+ return m.Entityfilter
+ }
+ return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+ IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"`
+ StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"`
+ EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"`
+ EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"`
+ StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"`
+ EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"`
+ EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
+func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0}
+}
+func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b)
+}
+func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src)
+}
+func (m *CompiledQuery_PrimaryScan) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m)
+}
+func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+ if m != nil && m.EndKey != nil {
+ return *m.EndKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+ if m != nil && m.EndInclusive != nil {
+ return *m.EndInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+ if m != nil {
+ return m.StartPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+ if m != nil {
+ return m.EndPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+ if m != nil && m.EndUnappliedLogTimestampUs != nil {
+ return *m.EndUnappliedLogTimestampUs
+ }
+ return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+ IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
+ PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
+ ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
+func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1}
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b)
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src)
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m)
+}
+func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+ if m != nil {
+ return m.PrefixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+ if m != nil && m.ValuePrefix != nil {
+ return *m.ValuePrefix
+ }
+ return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+ Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+ Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage() {}
+func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2}
+}
+func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b)
+}
+func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic)
+}
+func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src)
+}
+func (m *CompiledQuery_EntityFilter) XXX_Size() int {
+ return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m)
+}
+func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+type CompiledCursor struct {
+ Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage() {}
+func (*CompiledCursor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17}
+}
+func (m *CompiledCursor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledCursor.Unmarshal(m, b)
+}
+func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic)
+}
+func (dst *CompiledCursor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledCursor.Merge(dst, src)
+}
+func (m *CompiledCursor) XXX_Size() int {
+ return xxx_messageInfo_CompiledCursor.Size(m)
+}
+func (m *CompiledCursor) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledCursor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+ if m != nil {
+ return m.Position
+ }
+ return nil
+}
+
+type CompiledCursor_Position struct {
+ StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
+ Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
+ Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage() {}
+func (*CompiledCursor_Position) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0}
+}
+func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b)
+}
+func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic)
+}
+func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledCursor_Position.Merge(dst, src)
+}
+func (m *CompiledCursor_Position) XXX_Size() int {
+ return xxx_messageInfo_CompiledCursor_Position.Size(m)
+}
+func (m *CompiledCursor_Position) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+ if m != nil {
+ return m.Indexvalue
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+ Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
+func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0}
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b)
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic)
+}
+func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src)
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_Size() int {
+ return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m)
+}
+func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Cursor struct {
+ Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+ App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Cursor) Reset() { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage() {}
+func (*Cursor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18}
+}
+func (m *Cursor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cursor.Unmarshal(m, b)
+}
+func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cursor.Marshal(b, m, deterministic)
+}
+func (dst *Cursor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cursor.Merge(dst, src)
+}
+func (m *Cursor) XXX_Size() int {
+ return xxx_messageInfo_Cursor.Size(m)
+}
+func (m *Cursor) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cursor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cursor proto.InternalMessageInfo
+
+func (m *Cursor) GetCursor() uint64 {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return 0
+}
+
+func (m *Cursor) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+type Error struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Error) Reset() { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage() {}
+func (*Error) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19}
+}
+func (m *Error) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Error.Unmarshal(m, b)
+}
+func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Error.Marshal(b, m, deterministic)
+}
+func (dst *Error) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Error.Merge(dst, src)
+}
+func (m *Error) XXX_Size() int {
+ return xxx_messageInfo_Error.Size(m)
+}
+func (m *Error) XXX_DiscardUnknown() {
+ xxx_messageInfo_Error.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Error proto.InternalMessageInfo
+
+type Cost struct {
+ IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"`
+ IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"`
+ EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"`
+ EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"`
+ Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"`
+ ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"`
+ IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Cost) Reset() { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage() {}
+func (*Cost) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20}
+}
+func (m *Cost) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cost.Unmarshal(m, b)
+}
+func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cost.Marshal(b, m, deterministic)
+}
+func (dst *Cost) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cost.Merge(dst, src)
+}
+func (m *Cost) XXX_Size() int {
+ return xxx_messageInfo_Cost.Size(m)
+}
+func (m *Cost) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cost.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cost proto.InternalMessageInfo
+
+func (m *Cost) GetIndexWrites() int32 {
+ if m != nil && m.IndexWrites != nil {
+ return *m.IndexWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+ if m != nil && m.IndexWriteBytes != nil {
+ return *m.IndexWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+ if m != nil && m.EntityWrites != nil {
+ return *m.EntityWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+ if m != nil && m.EntityWriteBytes != nil {
+ return *m.EntityWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+ if m != nil {
+ return m.Commitcost
+ }
+ return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+ if m != nil && m.ApproximateStorageDelta != nil {
+ return *m.ApproximateStorageDelta
+ }
+ return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+ if m != nil && m.IdSequenceUpdates != nil {
+ return *m.IdSequenceUpdates
+ }
+ return 0
+}
+
+type Cost_CommitCost struct {
+ RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
+ RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage() {}
+func (*Cost_CommitCost) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0}
+}
+func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b)
+}
+func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic)
+}
+func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cost_CommitCost.Merge(dst, src)
+}
+func (m *Cost_CommitCost) XXX_Size() int {
+ return xxx_messageInfo_Cost_CommitCost.Size(m)
+}
+func (m *Cost_CommitCost) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+ if m != nil && m.RequestedEntityPuts != nil {
+ return *m.RequestedEntityPuts
+ }
+ return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+ if m != nil && m.RequestedEntityDeletes != nil {
+ return *m.RequestedEntityDeletes
+ }
+ return 0
+}
+
+type GetRequest struct {
+ Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+ AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+func (*GetRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21}
+}
+func (m *GetRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetRequest.Unmarshal(m, b)
+}
+func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetRequest.Merge(dst, src)
+}
+func (m *GetRequest) XXX_Size() int {
+ return xxx_messageInfo_GetRequest.Size(m)
+}
+func (m *GetRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetRequest proto.InternalMessageInfo
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+ if m != nil && m.AllowDeferred != nil {
+ return *m.AllowDeferred
+ }
+ return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+ Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
+ Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+ InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+func (*GetResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22}
+}
+func (m *GetResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetResponse.Unmarshal(m, b)
+}
+func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetResponse.Merge(dst, src)
+}
+func (m *GetResponse) XXX_Size() int {
+ return xxx_messageInfo_GetResponse.Size(m)
+}
+func (m *GetResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetResponse proto.InternalMessageInfo
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+ if m != nil {
+ return m.Deferred
+ }
+ return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+ if m != nil && m.InOrder != nil {
+ return *m.InOrder
+ }
+ return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+ Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+ Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+ Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage() {}
+func (*GetResponse_Entity) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0}
+}
+func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b)
+}
+func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic)
+}
+func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetResponse_Entity.Merge(dst, src)
+}
+func (m *GetResponse_Entity) XXX_Size() int {
+ return xxx_messageInfo_GetResponse_Entity.Size(m)
+}
+func (m *GetResponse_Entity) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+type PutRequest struct {
+ Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+ Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+func (*PutRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23}
+}
+func (m *PutRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PutRequest.Unmarshal(m, b)
+}
+func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
+}
+func (dst *PutRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PutRequest.Merge(dst, src)
+}
+func (m *PutRequest) XXX_Size() int {
+ return xxx_messageInfo_PutRequest.Size(m)
+}
+func (m *PutRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PutRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutRequest proto.InternalMessageInfo
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+ if m != nil && m.AutoIdPolicy != nil {
+ return *m.AutoIdPolicy
+ }
+ return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+func (*PutResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24}
+}
+func (m *PutResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PutResponse.Unmarshal(m, b)
+}
+func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
+}
+func (dst *PutResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PutResponse.Merge(dst, src)
+}
+func (m *PutResponse) XXX_Size() int {
+ return xxx_messageInfo_PutResponse.Size(m)
+}
+func (m *PutResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_PutResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutResponse proto.InternalMessageInfo
+
+func (m *PutResponse) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type TouchRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
+ Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TouchRequest) Reset() { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage() {}
+func (*TouchRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25}
+}
+func (m *TouchRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TouchRequest.Unmarshal(m, b)
+}
+func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic)
+}
+func (dst *TouchRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TouchRequest.Merge(dst, src)
+}
+func (m *TouchRequest) XXX_Size() int {
+ return xxx_messageInfo_TouchRequest.Size(m)
+}
+func (m *TouchRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_TouchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TouchRequest proto.InternalMessageInfo
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type TouchResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TouchResponse) Reset() { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage() {}
+func (*TouchResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26}
+}
+func (m *TouchResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TouchResponse.Unmarshal(m, b)
+}
+func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic)
+}
+func (dst *TouchResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TouchResponse.Merge(dst, src)
+}
+func (m *TouchResponse) XXX_Size() int {
+ return xxx_messageInfo_TouchResponse.Size(m)
+}
+func (m *TouchResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_TouchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TouchResponse proto.InternalMessageInfo
+
+func (m *TouchResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type DeleteRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage() {}
+func (*DeleteRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27}
+}
+func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
+}
+func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
+}
+func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteRequest.Merge(dst, src)
+}
+func (m *DeleteRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteRequest.Size(m)
+}
+func (m *DeleteRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type DeleteResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage() {}
+func (*DeleteResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28}
+}
+func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
+}
+func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
+}
+func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteResponse.Merge(dst, src)
+}
+func (m *DeleteResponse) XXX_Size() int {
+ return xxx_messageInfo_DeleteResponse.Size(m)
+}
+func (m *DeleteResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
+
+func (m *DeleteResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type NextRequest struct {
+ Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+ Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+ Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+ Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NextRequest) Reset() { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage() {}
+func (*NextRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29}
+}
+func (m *NextRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NextRequest.Unmarshal(m, b)
+}
+func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic)
+}
+func (dst *NextRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NextRequest.Merge(dst, src)
+}
+func (m *NextRequest) XXX_Size() int {
+ return xxx_messageInfo_NextRequest.Size(m)
+}
+func (m *NextRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_NextRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NextRequest proto.InternalMessageInfo
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+ Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+ Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+ SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
+ MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
+ KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
+ SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
+ CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
+ Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+ Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage() {}
+func (*QueryResult) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30}
+}
+func (m *QueryResult) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_QueryResult.Unmarshal(m, b)
+}
+func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
+}
+func (dst *QueryResult) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryResult.Merge(dst, src)
+}
+func (m *QueryResult) XXX_Size() int {
+ return xxx_messageInfo_QueryResult.Size(m)
+}
+func (m *QueryResult) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryResult proto.InternalMessageInfo
+
+func (m *QueryResult) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+ if m != nil && m.SkippedResults != nil {
+ return *m.SkippedResults
+ }
+ return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+ if m != nil && m.MoreResults != nil {
+ return *m.MoreResults
+ }
+ return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+ if m != nil && m.IndexOnly != nil {
+ return *m.IndexOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+ if m != nil && m.SmallOps != nil {
+ return *m.SmallOps
+ }
+ return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+ if m != nil {
+ return m.CompiledQuery
+ }
+ return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type AllocateIdsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
+ Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+ Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+ Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+func (*AllocateIdsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31}
+}
+func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b)
+}
+func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic)
+}
+func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllocateIdsRequest.Merge(dst, src)
+}
+func (m *AllocateIdsRequest) XXX_Size() int {
+ return xxx_messageInfo_AllocateIdsRequest.Size(m)
+}
+func (m *AllocateIdsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+ if m != nil {
+ return m.ModelKey
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+ if m != nil && m.Size != nil {
+ return *m.Size
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+ if m != nil && m.Max != nil {
+ return *m.Max
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+ if m != nil {
+ return m.Reserve
+ }
+ return nil
+}
+
+type AllocateIdsResponse struct {
+ Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+ End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+ Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+func (*AllocateIdsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32}
+}
+func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b)
+}
+func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic)
+}
+func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllocateIdsResponse.Merge(dst, src)
+}
+func (m *AllocateIdsResponse) XXX_Size() int {
+ return xxx_messageInfo_AllocateIdsResponse.Size(m)
+}
+func (m *AllocateIdsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type CompositeIndices struct {
+ Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage() {}
+func (*CompositeIndices) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33}
+}
+func (m *CompositeIndices) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompositeIndices.Unmarshal(m, b)
+}
+func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic)
+}
+func (dst *CompositeIndices) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompositeIndices.Merge(dst, src)
+}
+func (m *CompositeIndices) XXX_Size() int {
+ return xxx_messageInfo_CompositeIndices.Size(m)
+}
+func (m *CompositeIndices) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompositeIndices.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+type AddActionsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage() {}
+func (*AddActionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34}
+}
+func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b)
+}
+func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic)
+}
+func (dst *AddActionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AddActionsRequest.Merge(dst, src)
+}
+func (m *AddActionsRequest) XXX_Size() int {
+ return xxx_messageInfo_AddActionsRequest.Size(m)
+}
+func (m *AddActionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AddActionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+ if m != nil {
+ return m.Action
+ }
+ return nil
+}
+
+type AddActionsResponse struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage() {}
+func (*AddActionsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35}
+}
+func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b)
+}
+func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic)
+}
+func (dst *AddActionsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AddActionsResponse.Merge(dst, src)
+}
+func (m *AddActionsResponse) XXX_Size() int {
+ return xxx_messageInfo_AddActionsResponse.Size(m)
+}
+func (m *AddActionsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AddActionsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo
+
+type BeginTransactionRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
+ DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
+ Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
+ PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36}
+}
+func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b)
+}
+func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic)
+}
+func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BeginTransactionRequest.Merge(dst, src)
+}
+func (m *BeginTransactionRequest) XXX_Size() int {
+ return xxx_messageInfo_BeginTransactionRequest.Size(m)
+}
+func (m *BeginTransactionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+ if m != nil && m.AllowMultipleEg != nil {
+ return *m.AllowMultipleEg
+ }
+ return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+func (m *BeginTransactionRequest) GetDatabaseId() string {
+ if m != nil && m.DatabaseId != nil {
+ return *m.DatabaseId
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_BeginTransactionRequest_Mode
+}
+
+func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction {
+ if m != nil {
+ return m.PreviousTransaction
+ }
+ return nil
+}
+
+type CommitResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+func (*CommitResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37}
+}
+func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CommitResponse.Unmarshal(m, b)
+}
+func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
+}
+func (dst *CommitResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CommitResponse.Merge(dst, src)
+}
+func (m *CommitResponse) XXX_Size() int {
+ return xxx_messageInfo_CommitResponse.Size(m)
+}
+func (m *CommitResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CommitResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
+
+func (m *CommitResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type CommitResponse_Version struct {
+ RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
+ Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage() {}
+func (*CommitResponse_Version) Descriptor() ([]byte, []int) {
+ return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0}
+}
+func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b)
+}
+func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic)
+}
+func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CommitResponse_Version.Merge(dst, src)
+}
+func (m *CommitResponse_Version) XXX_Size() int {
+ return xxx_messageInfo_CommitResponse_Version.Size(m)
+}
+func (m *CommitResponse_Version) XXX_DiscardUnknown() {
+ xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+ if m != nil {
+ return m.RootEntityKey
+ }
+ return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Action)(nil), "appengine.Action")
+ proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue")
+ proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue")
+ proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue")
+ proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue")
+ proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement")
+ proto.RegisterType((*Property)(nil), "appengine.Property")
+ proto.RegisterType((*Path)(nil), "appengine.Path")
+ proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element")
+ proto.RegisterType((*Reference)(nil), "appengine.Reference")
+ proto.RegisterType((*User)(nil), "appengine.User")
+ proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto")
+ proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty")
+ proto.RegisterType((*Index)(nil), "appengine.Index")
+ proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property")
+ proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex")
+ proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix")
+ proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue")
+ proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition")
+ proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot")
+ proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader")
+ proto.RegisterType((*Transaction)(nil), "appengine.Transaction")
+ proto.RegisterType((*Query)(nil), "appengine.Query")
+ proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter")
+ proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order")
+ proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery")
+ proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan")
+ proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan")
+ proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter")
+ proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor")
+ proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position")
+ proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue")
+ proto.RegisterType((*Cursor)(nil), "appengine.Cursor")
+ proto.RegisterType((*Error)(nil), "appengine.Error")
+ proto.RegisterType((*Cost)(nil), "appengine.Cost")
+ proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost")
+ proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest")
+ proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse")
+ proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity")
+ proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest")
+ proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse")
+ proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest")
+ proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse")
+ proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest")
+ proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse")
+ proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest")
+ proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult")
+ proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest")
+ proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse")
+ proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices")
+ proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest")
+ proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse")
+ proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest")
+ proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse")
+ proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179)
+}
+
+var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{
+ // 4156 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46,
+ 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d,
+ 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48,
+ 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46,
+ 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8,
+ 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24,
+ 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd,
+ 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f,
+ 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74,
+ 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac,
+ 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8,
+ 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9,
+ 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22,
+ 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56,
+ 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b,
+ 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05,
+ 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c,
+ 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2,
+ 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16,
+ 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3,
+ 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce,
+ 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67,
+ 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66,
+ 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13,
+ 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c,
+ 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6,
+ 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a,
+ 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f,
+ 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15,
+ 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a,
+ 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b,
+ 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17,
+ 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad,
+ 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50,
+ 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6,
+ 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b,
+ 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4,
+ 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2,
+ 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc,
+ 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e,
+ 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62,
+ 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee,
+ 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f,
+ 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4,
+ 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02,
+ 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae,
+ 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94,
+ 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f,
+ 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a,
+ 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52,
+ 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc,
+ 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92,
+ 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9,
+ 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50,
+ 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9,
+ 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23,
+ 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f,
+ 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87,
+ 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda,
+ 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b,
+ 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d,
+ 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f,
+ 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b,
+ 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9,
+ 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0,
+ 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68,
+ 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b,
+ 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79,
+ 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63,
+ 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0,
+ 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1,
+ 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10,
+ 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b,
+ 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b,
+ 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08,
+ 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72,
+ 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23,
+ 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91,
+ 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f,
+ 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93,
+ 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c,
+ 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa,
+ 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f,
+ 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef,
+ 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4,
+ 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90,
+ 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f,
+ 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86,
+ 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d,
+ 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee,
+ 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c,
+ 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1,
+ 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7,
+ 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80,
+ 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c,
+ 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21,
+ 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6,
+ 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26,
+ 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba,
+ 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c,
+ 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2,
+ 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8,
+ 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90,
+ 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91,
+ 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58,
+ 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c,
+ 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b,
+ 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f,
+ 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02,
+ 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22,
+ 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b,
+ 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0,
+ 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18,
+ 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8,
+ 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b,
+ 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e,
+ 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84,
+ 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0,
+ 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec,
+ 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7,
+ 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60,
+ 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad,
+ 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4,
+ 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76,
+ 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0,
+ 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba,
+ 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5,
+ 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26,
+ 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60,
+ 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e,
+ 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33,
+ 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e,
+ 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e,
+ 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7,
+ 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45,
+ 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92,
+ 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb,
+ 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc,
+ 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43,
+ 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2,
+ 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5,
+ 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40,
+ 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa,
+ 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc,
+ 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8,
+ 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7,
+ 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6,
+ 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c,
+ 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a,
+ 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e,
+ 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8,
+ 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a,
+ 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55,
+ 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0,
+ 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e,
+ 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78,
+ 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8,
+ 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1,
+ 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a,
+ 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60,
+ 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf,
+ 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c,
+ 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d,
+ 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb,
+ 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f,
+ 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe,
+ 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1,
+ 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80,
+ 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2,
+ 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f,
+ 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d,
+ 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90,
+ 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb,
+ 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe,
+ 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7,
+ 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31,
+ 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe,
+ 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd,
+ 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99,
+ 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41,
+ 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1,
+ 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81,
+ 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63,
+ 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3,
+ 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff,
+ 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f,
+ 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14,
+ 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2,
+ 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4,
+ 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1,
+ 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b,
+ 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9,
+ 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18,
+ 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e,
+ 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9,
+ 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95,
+ 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30,
+ 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c,
+ 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73,
+ 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79,
+ 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59,
+ 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95,
+ 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4,
+ 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74,
+ 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49,
+ 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e,
+ 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42,
+ 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c,
+ 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b,
+ 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca,
+ 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c,
+ 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69,
+ 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a,
+ 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65,
+ 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96,
+ 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4,
+ 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1,
+ 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85,
+ 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf,
+ 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55,
+ 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58,
+ 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6,
+ 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e,
+ 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb,
+ 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd,
+ 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20,
+ 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63,
+ 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b,
+ 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27,
+ 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61,
+ 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44,
+ 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd,
+ 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91,
+ 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3,
+ 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0,
+ 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4,
+ 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74,
+ 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41,
+ 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02,
+ 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1,
+ 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06,
+ 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe,
+ 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59,
+ 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde,
+ 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89,
+ 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0,
+ 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb,
+ 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62,
+ 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5,
+ 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90,
+ 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02,
+ 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06,
+ 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91,
+ 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41,
+ 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37,
+ 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3,
+ 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71,
+ 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd,
+ 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01,
+ 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100755
index 00000000..497b4d9a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,551 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+ optional int64 int64Value = 1;
+ optional bool booleanValue = 2;
+ optional string stringValue = 3;
+ optional double doubleValue = 4;
+
+ optional group PointValue = 5 {
+ required double x = 6;
+ required double y = 7;
+ }
+
+ optional group UserValue = 8 {
+ required string email = 9;
+ required string auth_domain = 10;
+ optional string nickname = 11;
+ optional string federated_identity = 21;
+ optional string federated_provider = 22;
+ }
+
+ optional group ReferenceValue = 12 {
+ required string app = 13;
+ optional string name_space = 20;
+ repeated group PathElement = 14 {
+ required string type = 15;
+ optional int64 id = 16;
+ optional string name = 17;
+ }
+ }
+}
+
+message Property {
+ enum Meaning {
+ NO_MEANING = 0;
+ BLOB = 14;
+ TEXT = 15;
+ BYTESTRING = 16;
+
+ ATOM_CATEGORY = 1;
+ ATOM_LINK = 2;
+ ATOM_TITLE = 3;
+ ATOM_CONTENT = 4;
+ ATOM_SUMMARY = 5;
+ ATOM_AUTHOR = 6;
+
+ GD_WHEN = 7;
+ GD_EMAIL = 8;
+ GEORSS_POINT = 9;
+ GD_IM = 10;
+
+ GD_PHONENUMBER = 11;
+ GD_POSTALADDRESS = 12;
+
+ GD_RATING = 13;
+
+ BLOBKEY = 17;
+ ENTITY_PROTO = 19;
+
+ INDEX_VALUE = 18;
+ };
+
+ optional Meaning meaning = 1 [default = NO_MEANING];
+ optional string meaning_uri = 2;
+
+ required string name = 3;
+
+ required PropertyValue value = 5;
+
+ required bool multiple = 4;
+
+ optional bool searchable = 6 [default=false];
+
+ enum FtsTokenizationOption {
+ HTML = 1;
+ ATOM = 2;
+ }
+
+ optional FtsTokenizationOption fts_tokenization_option = 8;
+
+ optional string locale = 9 [default = "en"];
+}
+
+message Path {
+ repeated group Element = 1 {
+ required string type = 2;
+ optional int64 id = 3;
+ optional string name = 4;
+ }
+}
+
+message Reference {
+ required string app = 13;
+ optional string name_space = 20;
+ required Path path = 14;
+}
+
+message User {
+ required string email = 1;
+ required string auth_domain = 2;
+ optional string nickname = 3;
+ optional string federated_identity = 6;
+ optional string federated_provider = 7;
+}
+
+message EntityProto {
+ required Reference key = 13;
+ required Path entity_group = 16;
+ optional User owner = 17;
+
+ enum Kind {
+ GD_CONTACT = 1;
+ GD_EVENT = 2;
+ GD_MESSAGE = 3;
+ }
+ optional Kind kind = 4;
+ optional string kind_uri = 5;
+
+ repeated Property property = 14;
+ repeated Property raw_property = 15;
+
+ optional int32 rank = 18;
+}
+
+message CompositeProperty {
+ required int64 index_id = 1;
+ repeated string value = 2;
+}
+
+message Index {
+ required string entity_type = 1;
+ required bool ancestor = 5;
+ repeated group Property = 2 {
+ required string name = 3;
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+ optional Direction direction = 4 [default = ASCENDING];
+ }
+}
+
+message CompositeIndex {
+ required string app_id = 1;
+ required int64 id = 2;
+ required Index definition = 3;
+
+ enum State {
+ WRITE_ONLY = 1;
+ READ_WRITE = 2;
+ DELETED = 3;
+ ERROR = 4;
+ }
+ required State state = 4;
+
+ optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+ message IndexValue {
+ required string property_name = 1;
+ required PropertyValue value = 2;
+ }
+
+ repeated IndexValue index_value = 1;
+
+ optional Reference key = 2;
+
+ optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+ optional string key = 1;
+
+ optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+ enum Status {
+ INACTIVE = 0;
+ ACTIVE = 1;
+ }
+
+ required int64 ts = 1;
+}
+
+message InternalHeader {
+ optional string qos = 1;
+}
+
+message Transaction {
+ optional InternalHeader header = 4;
+ required fixed64 handle = 1;
+ required string app = 2;
+ optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+ optional InternalHeader header = 39;
+
+ required string app = 1;
+ optional string name_space = 29;
+
+ optional string kind = 3;
+ optional Reference ancestor = 17;
+
+ repeated group Filter = 4 {
+ enum Operator {
+ LESS_THAN = 1;
+ LESS_THAN_OR_EQUAL = 2;
+ GREATER_THAN = 3;
+ GREATER_THAN_OR_EQUAL = 4;
+ EQUAL = 5;
+ IN = 6;
+ EXISTS = 7;
+ }
+
+ required Operator op = 6;
+ repeated Property property = 14;
+ }
+
+ optional string search_query = 8;
+
+ repeated group Order = 9 {
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+
+ required string property = 10;
+ optional Direction direction = 11 [default = ASCENDING];
+ }
+
+ enum Hint {
+ ORDER_FIRST = 1;
+ ANCESTOR_FIRST = 2;
+ FILTER_FIRST = 3;
+ }
+ optional Hint hint = 18;
+
+ optional int32 count = 23;
+
+ optional int32 offset = 12 [default = 0];
+
+ optional int32 limit = 16;
+
+ optional CompiledCursor compiled_cursor = 30;
+ optional CompiledCursor end_compiled_cursor = 31;
+
+ repeated CompositeIndex composite_index = 19;
+
+ optional bool require_perfect_plan = 20 [default = false];
+
+ optional bool keys_only = 21 [default = false];
+
+ optional Transaction transaction = 22;
+
+ optional bool compile = 25 [default = false];
+
+ optional int64 failover_ms = 26;
+
+ optional bool strong = 32;
+
+ repeated string property_name = 33;
+
+ repeated string group_by_property_name = 34;
+
+ optional bool distinct = 24;
+
+ optional int64 min_safe_time_seconds = 35;
+
+ repeated string safe_replica_name = 36;
+
+ optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+ required group PrimaryScan = 1 {
+ optional string index_name = 2;
+
+ optional string start_key = 3;
+ optional bool start_inclusive = 4;
+ optional string end_key = 5;
+ optional bool end_inclusive = 6;
+
+ repeated string start_postfix_value = 22;
+ repeated string end_postfix_value = 23;
+
+ optional int64 end_unapplied_log_timestamp_us = 19;
+ }
+
+ repeated group MergeJoinScan = 7 {
+ required string index_name = 8;
+
+ repeated string prefix_value = 9;
+
+ optional bool value_prefix = 20 [default=false];
+ }
+
+ optional Index index_def = 21;
+
+ optional int32 offset = 10 [default = 0];
+
+ optional int32 limit = 11;
+
+ required bool keys_only = 12;
+
+ repeated string property_name = 24;
+
+ optional int32 distinct_infix_size = 25;
+
+ optional group EntityFilter = 13 {
+ optional bool distinct = 14 [default=false];
+
+ optional string kind = 17;
+ optional Reference ancestor = 18;
+ }
+}
+
+message CompiledCursor {
+ optional group Position = 2 {
+ optional string start_key = 27;
+
+ repeated group IndexValue = 29 {
+ optional string property = 30;
+ required PropertyValue value = 31;
+ }
+
+ optional Reference key = 32;
+
+ optional bool start_inclusive = 28 [default=true];
+ }
+}
+
+message Cursor {
+ required fixed64 cursor = 1;
+
+ optional string app = 2;
+}
+
+message Error {
+ enum ErrorCode {
+ BAD_REQUEST = 1;
+ CONCURRENT_TRANSACTION = 2;
+ INTERNAL_ERROR = 3;
+ NEED_INDEX = 4;
+ TIMEOUT = 5;
+ PERMISSION_DENIED = 6;
+ BIGTABLE_ERROR = 7;
+ COMMITTED_BUT_STILL_APPLYING = 8;
+ CAPABILITY_DISABLED = 9;
+ TRY_ALTERNATE_BACKEND = 10;
+ SAFE_TIME_TOO_OLD = 11;
+ }
+}
+
+message Cost {
+ optional int32 index_writes = 1;
+ optional int32 index_write_bytes = 2;
+ optional int32 entity_writes = 3;
+ optional int32 entity_write_bytes = 4;
+ optional group CommitCost = 5 {
+ optional int32 requested_entity_puts = 6;
+ optional int32 requested_entity_deletes = 7;
+ };
+ optional int32 approximate_storage_delta = 8;
+ optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+ optional InternalHeader header = 6;
+
+ repeated Reference key = 1;
+ optional Transaction transaction = 2;
+
+ optional int64 failover_ms = 3;
+
+ optional bool strong = 4;
+
+ optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+ repeated group Entity = 1 {
+ optional EntityProto entity = 2;
+ optional Reference key = 4;
+
+ optional int64 version = 3;
+ }
+
+ repeated Reference deferred = 5;
+
+ optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+ optional InternalHeader header = 11;
+
+ repeated EntityProto entity = 1;
+ optional Transaction transaction = 2;
+ repeated CompositeIndex composite_index = 3;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+
+ enum AutoIdPolicy {
+ CURRENT = 0;
+ SEQUENTIAL = 1;
+ }
+ optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+ repeated Reference key = 1;
+ optional Cost cost = 2;
+ repeated int64 version = 3;
+}
+
+message TouchRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 1;
+ repeated CompositeIndex composite_index = 2;
+ optional bool force = 3 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+ optional Cost cost = 1;
+}
+
+message DeleteRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 6;
+ optional Transaction transaction = 5;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+ optional Cost cost = 1;
+ repeated int64 version = 3;
+}
+
+message NextRequest {
+ optional InternalHeader header = 5;
+
+ required Cursor cursor = 1;
+ optional int32 count = 2;
+
+ optional int32 offset = 4 [default = 0];
+
+ optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+ optional Cursor cursor = 1;
+
+ repeated EntityProto result = 2;
+
+ optional int32 skipped_results = 7;
+
+ required bool more_results = 3;
+
+ optional bool keys_only = 4;
+
+ optional bool index_only = 9;
+
+ optional bool small_ops = 10;
+
+ optional CompiledQuery compiled_query = 5;
+
+ optional CompiledCursor compiled_cursor = 6;
+
+ repeated CompositeIndex index = 8;
+
+ repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+ optional InternalHeader header = 4;
+
+ optional Reference model_key = 1;
+
+ optional int64 size = 2;
+
+ optional int64 max = 3;
+
+ repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+ required int64 start = 1;
+ required int64 end = 2;
+ optional Cost cost = 3;
+}
+
+message CompositeIndices {
+ repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+ optional InternalHeader header = 3;
+
+ required Transaction transaction = 1;
+ repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+ optional InternalHeader header = 3;
+
+ required string app = 1;
+ optional bool allow_multiple_eg = 2 [default = false];
+ optional string database_id = 4;
+
+ enum TransactionMode {
+ UNKNOWN = 0;
+ READ_ONLY = 1;
+ READ_WRITE = 2;
+ }
+ optional TransactionMode mode = 5 [default = UNKNOWN];
+
+ optional Transaction previous_transaction = 7;
+}
+
+message CommitResponse {
+ optional Cost cost = 1;
+
+ repeated group Version = 3 {
+ required Reference root_entity_key = 4;
+ required int64 version = 5;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 00000000..d538701a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,14 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import netcontext "golang.org/x/net/context"
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+func AppID(c netcontext.Context) string {
+ return appID(FullyQualifiedAppID(c))
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 00000000..b59603f1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,57 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine"
+
+ netcontext "golang.org/x/net/context"
+)
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.DefaultVersionHostname(c)
+}
+
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string { return appengine.ServerSoftware() }
+func InstanceID() string { return appengine.InstanceID() }
+func IsDevAppServer() bool { return appengine.IsDevAppServer() }
+
+func RequestID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.RequestID(c)
+}
+
+func ModuleName(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.ModuleName(c)
+}
+func VersionID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.VersionID(c)
+}
+
+func fullyQualifiedAppID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return c.FullyQualifiedAppID()
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 00000000..5d806726
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,134 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "log"
+ "net/http"
+ "os"
+ "strings"
+
+ netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+ hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+ hRequestLogId = "X-AppEngine-Request-Log-Id"
+ hDatacenter = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+ c := fromContext(ctx)
+ if c == nil {
+ return nil
+ }
+ return c.Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+ if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
+ return dc
+ }
+ // If the header isn't set, read zone from the metadata service.
+ // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
+ zone, err := getMetadata("instance/zone")
+ if err != nil {
+ log.Printf("Datacenter: %v", err)
+ return ""
+ }
+ parts := strings.Split(string(zone), "/")
+ if len(parts) == 0 {
+ return ""
+ }
+ return parts[len(parts)-1]
+}
+
+func ServerSoftware() string {
+ // TODO(dsymonds): Remove fallback when we've verified this.
+ if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+ return s
+ }
+ if s := os.Getenv("GAE_ENV"); s != "" {
+ return s
+ }
+ return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+ return s
+ }
+ if s := os.Getenv("GAE_SERVICE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+ if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
+ if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
+}
+
+func InstanceID() string {
+ if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+ return s
+ }
+ if s := os.Getenv("GAE_INSTANCE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+ // gae_project has everything except the partition prefix.
+ if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
+ return appID
+ }
+ if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
+ return project
+ }
+ return string(mustGetMetadata("instance/attributes/gae_project"))
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_APPLICATION"); s != "" {
+ return s
+ }
+ appID := partitionlessAppID()
+
+ part := os.Getenv("GAE_PARTITION")
+ if part == "" {
+ part = string(mustGetMetadata("instance/attributes/gae_partition"))
+ }
+
+ if part != "" {
+ appID = part + "~" + appID
+ }
+ return appID
+}
+
+func IsDevAppServer() bool {
+ return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 00000000..051ea398
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+ errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+ service string
+ code int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+ timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+ Service string
+ Detail string
+ Code int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+ if e.Code == 0 {
+ if e.Detail == "" {
+ return "APIError <empty>"
+ }
+ return e.Detail
+ }
+ s := fmt.Sprintf("API error %d", e.Code)
+ if m, ok := errorCodeMaps[e.Service]; ok {
+ s += " (" + e.Service + ": " + m[e.Code] + ")"
+ } else {
+ // Shouldn't happen, but provide a bit more detail if it does.
+ s = e.Service + " " + s
+ }
+ if e.Detail != "" {
+ s += ": " + e.Detail
+ }
+ return s
+}
+
+func (e *APIError) IsTimeout() bool {
+ return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+ Detail string
+ Code int32
+ // TODO: Remove this if we get a distinguishable error code.
+ Timeout bool
+}
+
+func (e *CallError) Error() string {
+ var msg string
+ switch remotepb.RpcError_ErrorCode(e.Code) {
+ case remotepb.RpcError_UNKNOWN:
+ return e.Detail
+ case remotepb.RpcError_OVER_QUOTA:
+ msg = "Over quota"
+ case remotepb.RpcError_CAPABILITY_DISABLED:
+ msg = "Capability disabled"
+ case remotepb.RpcError_CANCELLED:
+ msg = "Canceled"
+ default:
+ msg = fmt.Sprintf("Call error %d", e.Code)
+ }
+ s := msg + ": " + e.Detail
+ if e.Timeout {
+ s += " (timeout)"
+ }
+ return s
+}
+
+func (e *CallError) IsTimeout() bool {
+ return e.Timeout
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 00000000..8545ac4a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,1313 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/log/log_service.proto
+
+package log
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type LogServiceError_ErrorCode int32
+
+const (
+ LogServiceError_OK LogServiceError_ErrorCode = 0
+ LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
+ LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
+)
+
+var LogServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "STORAGE_ERROR",
+}
+var LogServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "STORAGE_ERROR": 2,
+}
+
+func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
+ p := new(LogServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x LogServiceError_ErrorCode) String() string {
+ return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
+}
+func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = LogServiceError_ErrorCode(value)
+ return nil
+}
+func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0}
+}
+
+type LogServiceError struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogServiceError) Reset() { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage() {}
+func (*LogServiceError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{0}
+}
+func (m *LogServiceError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogServiceError.Unmarshal(m, b)
+}
+func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic)
+}
+func (dst *LogServiceError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogServiceError.Merge(dst, src)
+}
+func (m *LogServiceError) XXX_Size() int {
+ return xxx_messageInfo_LogServiceError.Size(m)
+}
+func (m *LogServiceError) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogServiceError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogServiceError proto.InternalMessageInfo
+
+type UserAppLogLine struct {
+ TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
+ Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage() {}
+func (*UserAppLogLine) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{1}
+}
+func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b)
+}
+func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic)
+}
+func (dst *UserAppLogLine) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserAppLogLine.Merge(dst, src)
+}
+func (m *UserAppLogLine) XXX_Size() int {
+ return xxx_messageInfo_UserAppLogLine.Size(m)
+}
+func (m *UserAppLogLine) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserAppLogLine.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo
+
+func (m *UserAppLogLine) GetTimestampUsec() int64 {
+ if m != nil && m.TimestampUsec != nil {
+ return *m.TimestampUsec
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetLevel() int64 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+type UserAppLogGroup struct {
+ LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage() {}
+func (*UserAppLogGroup) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{2}
+}
+func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b)
+}
+func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic)
+}
+func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserAppLogGroup.Merge(dst, src)
+}
+func (m *UserAppLogGroup) XXX_Size() int {
+ return xxx_messageInfo_UserAppLogGroup.Size(m)
+}
+func (m *UserAppLogGroup) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo
+
+func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
+ if m != nil {
+ return m.LogLine
+ }
+ return nil
+}
+
+type FlushRequest struct {
+ Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FlushRequest) Reset() { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage() {}
+func (*FlushRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{3}
+}
+func (m *FlushRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FlushRequest.Unmarshal(m, b)
+}
+func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic)
+}
+func (dst *FlushRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FlushRequest.Merge(dst, src)
+}
+func (m *FlushRequest) XXX_Size() int {
+ return xxx_messageInfo_FlushRequest.Size(m)
+}
+func (m *FlushRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_FlushRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlushRequest proto.InternalMessageInfo
+
+func (m *FlushRequest) GetLogs() []byte {
+ if m != nil {
+ return m.Logs
+ }
+ return nil
+}
+
+type SetStatusRequest struct {
+ Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage() {}
+func (*SetStatusRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{4}
+}
+func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b)
+}
+func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic)
+}
+func (dst *SetStatusRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetStatusRequest.Merge(dst, src)
+}
+func (m *SetStatusRequest) XXX_Size() int {
+ return xxx_messageInfo_SetStatusRequest.Size(m)
+}
+func (m *SetStatusRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetStatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo
+
+func (m *SetStatusRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+type LogOffset struct {
+ RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogOffset) Reset() { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage() {}
+func (*LogOffset) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{5}
+}
+func (m *LogOffset) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogOffset.Unmarshal(m, b)
+}
+func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic)
+}
+func (dst *LogOffset) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogOffset.Merge(dst, src)
+}
+func (m *LogOffset) XXX_Size() int {
+ return xxx_messageInfo_LogOffset.Size(m)
+}
+func (m *LogOffset) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogOffset.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogOffset proto.InternalMessageInfo
+
+func (m *LogOffset) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+type LogLine struct {
+ Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+ Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogLine) Reset() { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage() {}
+func (*LogLine) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{6}
+}
+func (m *LogLine) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogLine.Unmarshal(m, b)
+}
+func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogLine.Marshal(b, m, deterministic)
+}
+func (dst *LogLine) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogLine.Merge(dst, src)
+}
+func (m *LogLine) XXX_Size() int {
+ return xxx_messageInfo_LogLine.Size(m)
+}
+func (m *LogLine) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogLine.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogLine proto.InternalMessageInfo
+
+func (m *LogLine) GetTime() int64 {
+ if m != nil && m.Time != nil {
+ return *m.Time
+ }
+ return 0
+}
+
+func (m *LogLine) GetLevel() int32 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *LogLine) GetLogMessage() string {
+ if m != nil && m.LogMessage != nil {
+ return *m.LogMessage
+ }
+ return ""
+}
+
+type RequestLog struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"`
+ RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
+ Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
+ Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
+ StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"`
+ Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
+ Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
+ Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
+ Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
+ HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"`
+ Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
+ ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"`
+ Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
+ UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"`
+ UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"`
+ Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
+ ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"`
+ Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
+ Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
+ TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"`
+ TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"`
+ WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"`
+ PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"`
+ Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
+ CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"`
+ Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
+ LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"`
+ AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"`
+ ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"`
+ WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"`
+ WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"`
+ ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
+ ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RequestLog) Reset() { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage() {}
+func (*RequestLog) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{7}
+}
+func (m *RequestLog) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RequestLog.Unmarshal(m, b)
+}
+func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic)
+}
+func (dst *RequestLog) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RequestLog.Merge(dst, src)
+}
+func (m *RequestLog) XXX_Size() int {
+ return xxx_messageInfo_RequestLog.Size(m)
+}
+func (m *RequestLog) XXX_DiscardUnknown() {
+ xxx_messageInfo_RequestLog.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestLog proto.InternalMessageInfo
+
+const Default_RequestLog_ModuleId string = "default"
+const Default_RequestLog_ReplicaIndex int32 = -1
+const Default_RequestLog_Finished bool = true
+
+func (m *RequestLog) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_RequestLog_ModuleId
+}
+
+func (m *RequestLog) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *RequestLog) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *RequestLog) GetIp() string {
+ if m != nil && m.Ip != nil {
+ return *m.Ip
+ }
+ return ""
+}
+
+func (m *RequestLog) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetLatency() int64 {
+ if m != nil && m.Latency != nil {
+ return *m.Latency
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMcycles() int64 {
+ if m != nil && m.Mcycles != nil {
+ return *m.Mcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *RequestLog) GetResource() string {
+ if m != nil && m.Resource != nil {
+ return *m.Resource
+ }
+ return ""
+}
+
+func (m *RequestLog) GetHttpVersion() string {
+ if m != nil && m.HttpVersion != nil {
+ return *m.HttpVersion
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStatus() int32 {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return 0
+}
+
+func (m *RequestLog) GetResponseSize() int64 {
+ if m != nil && m.ResponseSize != nil {
+ return *m.ResponseSize
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReferrer() string {
+ if m != nil && m.Referrer != nil {
+ return *m.Referrer
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUserAgent() string {
+ if m != nil && m.UserAgent != nil {
+ return *m.UserAgent
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUrlMapEntry() string {
+ if m != nil && m.UrlMapEntry != nil {
+ return *m.UrlMapEntry
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCombined() string {
+ if m != nil && m.Combined != nil {
+ return *m.Combined
+ }
+ return ""
+}
+
+func (m *RequestLog) GetApiMcycles() int64 {
+ if m != nil && m.ApiMcycles != nil {
+ return *m.ApiMcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCost() float64 {
+ if m != nil && m.Cost != nil {
+ return *m.Cost
+ }
+ return 0
+}
+
+func (m *RequestLog) GetTaskQueueName() string {
+ if m != nil && m.TaskQueueName != nil {
+ return *m.TaskQueueName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetTaskName() string {
+ if m != nil && m.TaskName != nil {
+ return *m.TaskName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetWasLoadingRequest() bool {
+ if m != nil && m.WasLoadingRequest != nil {
+ return *m.WasLoadingRequest
+ }
+ return false
+}
+
+func (m *RequestLog) GetPendingTime() int64 {
+ if m != nil && m.PendingTime != nil {
+ return *m.PendingTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return Default_RequestLog_ReplicaIndex
+}
+
+func (m *RequestLog) GetFinished() bool {
+ if m != nil && m.Finished != nil {
+ return *m.Finished
+ }
+ return Default_RequestLog_Finished
+}
+
+func (m *RequestLog) GetCloneKey() []byte {
+ if m != nil {
+ return m.CloneKey
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLine() []*LogLine {
+ if m != nil {
+ return m.Line
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLinesIncomplete() bool {
+ if m != nil && m.LinesIncomplete != nil {
+ return *m.LinesIncomplete
+ }
+ return false
+}
+
+func (m *RequestLog) GetAppEngineRelease() []byte {
+ if m != nil {
+ return m.AppEngineRelease
+ }
+ return nil
+}
+
+func (m *RequestLog) GetExitReason() int32 {
+ if m != nil && m.ExitReason != nil {
+ return *m.ExitReason
+ }
+ return 0
+}
+
+func (m *RequestLog) GetWasThrottledForTime() bool {
+ if m != nil && m.WasThrottledForTime != nil {
+ return *m.WasThrottledForTime
+ }
+ return false
+}
+
+func (m *RequestLog) GetWasThrottledForRequests() bool {
+ if m != nil && m.WasThrottledForRequests != nil {
+ return *m.WasThrottledForRequests
+ }
+ return false
+}
+
+func (m *RequestLog) GetThrottledTime() int64 {
+ if m != nil && m.ThrottledTime != nil {
+ return *m.ThrottledTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetServerName() []byte {
+ if m != nil {
+ return m.ServerName
+ }
+ return nil
+}
+
+type LogModuleVersion struct {
+ ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage() {}
+func (*LogModuleVersion) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{8}
+}
+func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b)
+}
+func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic)
+}
+func (dst *LogModuleVersion) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogModuleVersion.Merge(dst, src)
+}
+func (m *LogModuleVersion) XXX_Size() int {
+ return xxx_messageInfo_LogModuleVersion.Size(m)
+}
+func (m *LogModuleVersion) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogModuleVersion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo
+
+const Default_LogModuleVersion_ModuleId string = "default"
+
+func (m *LogModuleVersion) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_LogModuleVersion_ModuleId
+}
+
+func (m *LogModuleVersion) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+type LogReadRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
+ ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
+ StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+ RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
+ MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
+ IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
+ Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+ CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
+ HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
+ IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
+ AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
+ IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
+ IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
+ CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
+ NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage() {}
+func (*LogReadRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{9}
+}
+func (m *LogReadRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogReadRequest.Unmarshal(m, b)
+}
+func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic)
+}
+func (dst *LogReadRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogReadRequest.Merge(dst, src)
+}
+func (m *LogReadRequest) XXX_Size() int {
+ return xxx_messageInfo_LogReadRequest.Size(m)
+}
+func (m *LogReadRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogReadRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo
+
+func (m *LogReadRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
+ if m != nil {
+ return m.ModuleVersion
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetRequestId() [][]byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetMinimumLogLevel() int32 {
+ if m != nil && m.MinimumLogLevel != nil {
+ return *m.MinimumLogLevel
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeIncomplete() bool {
+ if m != nil && m.IncludeIncomplete != nil {
+ return *m.IncludeIncomplete
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetCombinedLogRegex() string {
+ if m != nil && m.CombinedLogRegex != nil {
+ return *m.CombinedLogRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetHostRegex() string {
+ if m != nil && m.HostRegex != nil {
+ return *m.HostRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeAppLogs() bool {
+ if m != nil && m.IncludeAppLogs != nil {
+ return *m.IncludeAppLogs
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
+ if m != nil && m.AppLogsPerRequest != nil {
+ return *m.AppLogsPerRequest
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeHost() bool {
+ if m != nil && m.IncludeHost != nil {
+ return *m.IncludeHost
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetIncludeAll() bool {
+ if m != nil && m.IncludeAll != nil {
+ return *m.IncludeAll
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCacheIterator() bool {
+ if m != nil && m.CacheIterator != nil {
+ return *m.CacheIterator
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return 0
+}
+
+type LogReadResponse struct {
+ Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+ LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage() {}
+func (*LogReadResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{10}
+}
+func (m *LogReadResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogReadResponse.Unmarshal(m, b)
+}
+func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic)
+}
+func (dst *LogReadResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogReadResponse.Merge(dst, src)
+}
+func (m *LogReadResponse) XXX_Size() int {
+ return xxx_messageInfo_LogReadResponse.Size(m)
+}
+func (m *LogReadResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogReadResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo
+
+func (m *LogReadResponse) GetLog() []*RequestLog {
+ if m != nil {
+ return m.Log
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetLastEndTime() int64 {
+ if m != nil && m.LastEndTime != nil {
+ return *m.LastEndTime
+ }
+ return 0
+}
+
+type LogUsageRecord struct {
+ VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
+ Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+ TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
+ Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage() {}
+func (*LogUsageRecord) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{11}
+}
+func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b)
+}
+func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic)
+}
+func (dst *LogUsageRecord) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogUsageRecord.Merge(dst, src)
+}
+func (m *LogUsageRecord) XXX_Size() int {
+ return xxx_messageInfo_LogUsageRecord.Size(m)
+}
+func (m *LogUsageRecord) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogUsageRecord.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo
+
+func (m *LogUsageRecord) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *LogUsageRecord) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetTotalSize() int64 {
+ if m != nil && m.TotalSize != nil {
+ return *m.TotalSize
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetRecords() int32 {
+ if m != nil && m.Records != nil {
+ return *m.Records
+ }
+ return 0
+}
+
+type LogUsageRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
+ ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
+ CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
+ UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
+ VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage() {}
+func (*LogUsageRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{12}
+}
+func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b)
+}
+func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic)
+}
+func (dst *LogUsageRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogUsageRequest.Merge(dst, src)
+}
+func (m *LogUsageRequest) XXX_Size() int {
+ return xxx_messageInfo_LogUsageRequest.Size(m)
+}
+func (m *LogUsageRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogUsageRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo
+
+const Default_LogUsageRequest_ResolutionHours uint32 = 1
+
+func (m *LogUsageRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogUsageRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogUsageRequest) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetResolutionHours() uint32 {
+ if m != nil && m.ResolutionHours != nil {
+ return *m.ResolutionHours
+ }
+ return Default_LogUsageRequest_ResolutionHours
+}
+
+func (m *LogUsageRequest) GetCombineVersions() bool {
+ if m != nil && m.CombineVersions != nil {
+ return *m.CombineVersions
+ }
+ return false
+}
+
+func (m *LogUsageRequest) GetUsageVersion() int32 {
+ if m != nil && m.UsageVersion != nil {
+ return *m.UsageVersion
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetVersionsOnly() bool {
+ if m != nil && m.VersionsOnly != nil {
+ return *m.VersionsOnly
+ }
+ return false
+}
+
+type LogUsageResponse struct {
+ Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+ Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage() {}
+func (*LogUsageResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_log_service_f054fd4b5012319d, []int{13}
+}
+func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b)
+}
+func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic)
+}
+func (dst *LogUsageResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LogUsageResponse.Merge(dst, src)
+}
+func (m *LogUsageResponse) XXX_Size() int {
+ return xxx_messageInfo_LogUsageResponse.Size(m)
+}
+func (m *LogUsageResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LogUsageResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo
+
+func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
+ if m != nil {
+ return m.Usage
+ }
+ return nil
+}
+
+func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError")
+ proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine")
+ proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup")
+ proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest")
+ proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest")
+ proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset")
+ proto.RegisterType((*LogLine)(nil), "appengine.LogLine")
+ proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog")
+ proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion")
+ proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest")
+ proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse")
+ proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord")
+ proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest")
+ proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d)
+}
+
+var fileDescriptor_log_service_f054fd4b5012319d = []byte{
+ // 1553 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6,
+ 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e,
+ 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40,
+ 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72,
+ 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d,
+ 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9,
+ 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32,
+ 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5,
+ 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3,
+ 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17,
+ 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f,
+ 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3,
+ 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46,
+ 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26,
+ 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31,
+ 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5,
+ 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd,
+ 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46,
+ 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe,
+ 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc,
+ 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1,
+ 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f,
+ 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39,
+ 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35,
+ 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd,
+ 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b,
+ 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c,
+ 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0,
+ 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36,
+ 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f,
+ 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a,
+ 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed,
+ 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95,
+ 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91,
+ 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87,
+ 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1,
+ 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5,
+ 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf,
+ 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce,
+ 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66,
+ 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0,
+ 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62,
+ 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c,
+ 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba,
+ 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a,
+ 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa,
+ 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4,
+ 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1,
+ 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62,
+ 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b,
+ 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4,
+ 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90,
+ 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78,
+ 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff,
+ 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56,
+ 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5,
+ 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b,
+ 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21,
+ 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69,
+ 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98,
+ 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51,
+ 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17,
+ 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c,
+ 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f,
+ 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f,
+ 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b,
+ 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10,
+ 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54,
+ 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d,
+ 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98,
+ 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0,
+ 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0,
+ 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61,
+ 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83,
+ 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3,
+ 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75,
+ 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38,
+ 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37,
+ 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e,
+ 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d,
+ 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65,
+ 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43,
+ 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06,
+ 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2,
+ 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea,
+ 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64,
+ 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1,
+ 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf,
+ 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36,
+ 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53,
+ 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7,
+ 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56,
+ 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b,
+ 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77,
+ 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05,
+ 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd,
+ 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00,
+ 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 00000000..8981dc47
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ STORAGE_ERROR = 2;
+ }
+}
+
+message UserAppLogLine {
+ required int64 timestamp_usec = 1;
+ required int64 level = 2;
+ required string message = 3;
+}
+
+message UserAppLogGroup {
+ repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+ optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+ required string status = 1;
+}
+
+
+message LogOffset {
+ optional bytes request_id = 1;
+}
+
+message LogLine {
+ required int64 time = 1;
+ required int32 level = 2;
+ required string log_message = 3;
+}
+
+message RequestLog {
+ required string app_id = 1;
+ optional string module_id = 37 [default="default"];
+ required string version_id = 2;
+ required bytes request_id = 3;
+ optional LogOffset offset = 35;
+ required string ip = 4;
+ optional string nickname = 5;
+ required int64 start_time = 6;
+ required int64 end_time = 7;
+ required int64 latency = 8;
+ required int64 mcycles = 9;
+ required string method = 10;
+ required string resource = 11;
+ required string http_version = 12;
+ required int32 status = 13;
+ required int64 response_size = 14;
+ optional string referrer = 15;
+ optional string user_agent = 16;
+ required string url_map_entry = 17;
+ required string combined = 18;
+ optional int64 api_mcycles = 19;
+ optional string host = 20;
+ optional double cost = 21;
+
+ optional string task_queue_name = 22;
+ optional string task_name = 23;
+
+ optional bool was_loading_request = 24;
+ optional int64 pending_time = 25;
+ optional int32 replica_index = 26 [default = -1];
+ optional bool finished = 27 [default = true];
+ optional bytes clone_key = 28;
+
+ repeated LogLine line = 29;
+
+ optional bool lines_incomplete = 36;
+ optional bytes app_engine_release = 38;
+
+ optional int32 exit_reason = 30;
+ optional bool was_throttled_for_time = 31;
+ optional bool was_throttled_for_requests = 32;
+ optional int64 throttled_time = 33;
+
+ optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+ optional string module_id = 1 [default="default"];
+ optional string version_id = 2;
+}
+
+message LogReadRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ repeated LogModuleVersion module_version = 19;
+
+ optional int64 start_time = 3;
+ optional int64 end_time = 4;
+ optional LogOffset offset = 5;
+ repeated bytes request_id = 6;
+
+ optional int32 minimum_log_level = 7;
+ optional bool include_incomplete = 8;
+ optional int64 count = 9;
+
+ optional string combined_log_regex = 14;
+ optional string host_regex = 15;
+ optional int32 replica_index = 16;
+
+ optional bool include_app_logs = 10;
+ optional int32 app_logs_per_request = 17;
+ optional bool include_host = 11;
+ optional bool include_all = 12;
+ optional bool cache_iterator = 13;
+ optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+ repeated RequestLog log = 1;
+ optional LogOffset offset = 2;
+ optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+ optional string version_id = 1;
+ optional int32 start_time = 2;
+ optional int32 end_time = 3;
+ optional int64 count = 4;
+ optional int64 total_size = 5;
+ optional int32 records = 6;
+}
+
+message LogUsageRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ optional int32 start_time = 3;
+ optional int32 end_time = 4;
+ optional uint32 resolution_hours = 5 [default = 1];
+ optional bool combine_versions = 6;
+ optional int32 usage_version = 7;
+ optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+ repeated LogUsageRecord usage = 1;
+ optional LogUsageRecord summary = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
new file mode 100644
index 00000000..49036163
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -0,0 +1,15 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine_internal"
+)
+
+func Main() {
+ appengine_internal.Main()
+}
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
new file mode 100644
index 00000000..822e784a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -0,0 +1,48 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+)
+
+func Main() {
+ installHealthChecker(http.DefaultServeMux)
+
+ port := "8080"
+ if s := os.Getenv("PORT"); s != "" {
+ port = s
+ }
+
+ host := ""
+ if IsDevAppServer() {
+ host = "127.0.0.1"
+ }
+ if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+ log.Fatalf("http.ListenAndServe: %v", err)
+ }
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+ // If no health check handler has been installed by this point, add a trivial one.
+ const healthPath = "/_ah/health"
+ hreq := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: healthPath,
+ },
+ }
+ if _, pat := mux.Handler(hreq); pat != healthPath {
+ mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ })
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 00000000..c4ba63bb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,60 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+// https://cloud.google.com/compute/docs/metadata
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+)
+
+const (
+ metadataHost = "metadata"
+ metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+ metadataRequestHeaders = http.Header{
+ "Metadata-Flavor": []string{"Google"},
+ }
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+ b, err := getMetadata(key)
+ if err != nil {
+ panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
+ }
+ return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+ // TODO(dsymonds): May need to use url.Parse to support keys with query args.
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: metadataHost,
+ Path: metadataPath + key,
+ },
+ Header: metadataRequestHeaders,
+ Host: metadataHost,
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+ }
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 00000000..3b94cf0c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+ "log"
+ "net"
+ "runtime"
+ "sync"
+ "time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+ // non-blocking
+ select {
+ case <-limitSem:
+ default:
+ // This should not normally happen.
+ log.Print("appengine: unbalanced limitSem release!")
+ }
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+ limitSem <- 1
+
+ // Dial with a timeout in case the API host is MIA.
+ // The connection should normally be very fast.
+ conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+ if err != nil {
+ limitRelease()
+ return nil, err
+ }
+ lc := &limitConn{Conn: conn}
+ runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+ return lc, nil
+}
+
+type limitConn struct {
+ close sync.Once
+ net.Conn
+}
+
+func (lc *limitConn) Close() error {
+ defer lc.close.Do(func() {
+ limitRelease()
+ runtime.SetFinalizer(lc, nil)
+ })
+ return lc.Conn.Close()
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100755
index 00000000..2fdb546a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+# Run protoc once per package.
+for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
+ echo 1>&2 "* $dir"
+ protoc --go_out=. $dir/*.proto
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+ # Remove proto.RegisterEnum calls.
+ # These cause duplicate registration panics when these packages
+ # are used on classic App Engine. proto.RegisterEnum only affects
+ # parsing the text format; we don't care about that.
+ # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+ sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 00000000..8d782a38
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,361 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type RpcError_ErrorCode int32
+
+const (
+ RpcError_UNKNOWN RpcError_ErrorCode = 0
+ RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
+ RpcError_PARSE_ERROR RpcError_ErrorCode = 2
+ RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
+ RpcError_OVER_QUOTA RpcError_ErrorCode = 4
+ RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
+ RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+ RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
+ RpcError_BAD_REQUEST RpcError_ErrorCode = 8
+ RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
+ RpcError_CANCELLED RpcError_ErrorCode = 10
+ RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
+ RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CALL_NOT_FOUND",
+ 2: "PARSE_ERROR",
+ 3: "SECURITY_VIOLATION",
+ 4: "OVER_QUOTA",
+ 5: "REQUEST_TOO_LARGE",
+ 6: "CAPABILITY_DISABLED",
+ 7: "FEATURE_DISABLED",
+ 8: "BAD_REQUEST",
+ 9: "RESPONSE_TOO_LARGE",
+ 10: "CANCELLED",
+ 11: "REPLAY_ERROR",
+ 12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CALL_NOT_FOUND": 1,
+ "PARSE_ERROR": 2,
+ "SECURITY_VIOLATION": 3,
+ "OVER_QUOTA": 4,
+ "REQUEST_TOO_LARGE": 5,
+ "CAPABILITY_DISABLED": 6,
+ "FEATURE_DISABLED": 7,
+ "BAD_REQUEST": 8,
+ "RESPONSE_TOO_LARGE": 9,
+ "CANCELLED": 10,
+ "REPLAY_ERROR": 11,
+ "DEADLINE_EXCEEDED": 12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+ p := new(RpcError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RpcError_ErrorCode) String() string {
+ return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RpcError_ErrorCode(value)
+ return nil
+}
+func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
+}
+
+type Request struct {
+ ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
+ Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+ Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+ RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Request.Unmarshal(m, b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+}
+func (dst *Request) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Request.Merge(dst, src)
+}
+func (m *Request) XXX_Size() int {
+ return xxx_messageInfo_Request.Size(m)
+}
+func (m *Request) XXX_DiscardUnknown() {
+ xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
+
+func (m *Request) GetServiceName() string {
+ if m != nil && m.ServiceName != nil {
+ return *m.ServiceName
+ }
+ return ""
+}
+
+func (m *Request) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *Request) GetRequest() []byte {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *Request) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+type ApplicationError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ApplicationError) Reset() { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage() {}
+func (*ApplicationError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
+}
+func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
+}
+func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
+}
+func (dst *ApplicationError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ApplicationError.Merge(dst, src)
+}
+func (m *ApplicationError) XXX_Size() int {
+ return xxx_messageInfo_ApplicationError.Size(m)
+}
+func (m *ApplicationError) XXX_DiscardUnknown() {
+ xxx_messageInfo_ApplicationError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
+
+func (m *ApplicationError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type RpcError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RpcError) Reset() { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage() {}
+func (*RpcError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
+}
+func (m *RpcError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RpcError.Unmarshal(m, b)
+}
+func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
+}
+func (dst *RpcError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RpcError.Merge(dst, src)
+}
+func (m *RpcError) XXX_Size() int {
+ return xxx_messageInfo_RpcError.Size(m)
+}
+func (m *RpcError) XXX_DiscardUnknown() {
+ xxx_messageInfo_RpcError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RpcError proto.InternalMessageInfo
+
+func (m *RpcError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *RpcError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type Response struct {
+ Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+ Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+ ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
+ JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
+ RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+func (*Response) Descriptor() ([]byte, []int) {
+ return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
+}
+func (m *Response) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Response.Unmarshal(m, b)
+}
+func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Response.Marshal(b, m, deterministic)
+}
+func (dst *Response) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Response.Merge(dst, src)
+}
+func (m *Response) XXX_Size() int {
+ return xxx_messageInfo_Response.Size(m)
+}
+func (m *Response) XXX_DiscardUnknown() {
+ xxx_messageInfo_Response.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Response proto.InternalMessageInfo
+
+func (m *Response) GetResponse() []byte {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *Response) GetException() []byte {
+ if m != nil {
+ return m.Exception
+ }
+ return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+ if m != nil {
+ return m.ApplicationError
+ }
+ return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+ if m != nil {
+ return m.JavaException
+ }
+ return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+ if m != nil {
+ return m.RpcError
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Request)(nil), "remote_api.Request")
+ proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
+ proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
+ proto.RegisterType((*Response)(nil), "remote_api.Response")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
+}
+
+var fileDescriptor_remote_api_1978114ec33a273d = []byte{
+ // 531 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
+ 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
+ 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
+ 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
+ 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
+ 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
+ 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
+ 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
+ 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
+ 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
+ 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
+ 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
+ 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
+ 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
+ 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
+ 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
+ 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
+ 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
+ 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
+ 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
+ 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
+ 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
+ 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
+ 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
+ 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
+ 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
+ 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
+ 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
+ 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
+ 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
+ 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
+ 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
+ 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
+ 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 00000000..f21763a4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+ required string service_name = 2;
+ required string method = 3;
+ required bytes request = 4;
+ optional string request_id = 5;
+}
+
+message ApplicationError {
+ required int32 code = 1;
+ required string detail = 2;
+}
+
+message RpcError {
+ enum ErrorCode {
+ UNKNOWN = 0;
+ CALL_NOT_FOUND = 1;
+ PARSE_ERROR = 2;
+ SECURITY_VIOLATION = 3;
+ OVER_QUOTA = 4;
+ REQUEST_TOO_LARGE = 5;
+ CAPABILITY_DISABLED = 6;
+ FEATURE_DISABLED = 7;
+ BAD_REQUEST = 8;
+ RESPONSE_TOO_LARGE = 9;
+ CANCELLED = 10;
+ REPLAY_ERROR = 11;
+ DEADLINE_EXCEEDED = 12;
+ }
+ required int32 code = 1;
+ optional string detail = 2;
+}
+
+message Response {
+ optional bytes response = 1;
+ optional bytes exception = 2;
+ optional ApplicationError application_error = 3;
+ optional bytes java_exception = 4;
+ optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 00000000..9006ae65
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,115 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+ v := reflect.ValueOf(f)
+ transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+ v := reflect.ValueOf(pb)
+ if f, ok := transactionSetters[v.Type()]; ok {
+ f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+ }
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+ t, _ := ctx.Value(&transactionKey).(*transaction)
+ return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+ return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+ transaction pb.Transaction
+ finished bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
+ if transactionFromContext(c) != nil {
+ return nil, errors.New("nested transactions are not supported")
+ }
+
+ // Begin the transaction.
+ t := &transaction{}
+ req := &pb.BeginTransactionRequest{
+ App: proto.String(FullyQualifiedAppID(c)),
+ }
+ if xg {
+ req.AllowMultipleEg = proto.Bool(true)
+ }
+ if previousTransaction != nil {
+ req.PreviousTransaction = previousTransaction
+ }
+ if readOnly {
+ req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
+ } else {
+ req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
+ }
+ if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+ return nil, err
+ }
+
+ // Call f, rolling back the transaction if f returns a non-nil error, or panics.
+ // The panic is not recovered.
+ defer func() {
+ if t.finished {
+ return
+ }
+ t.finished = true
+ // Ignore the error return value, since we are already returning a non-nil
+ // error (or we're panicking).
+ Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+ }()
+ if err := f(withTransaction(c, t)); err != nil {
+ return &t.transaction, err
+ }
+ t.finished = true
+
+ // Commit the transaction.
+ res := &pb.CommitResponse{}
+ err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+ if ae, ok := err.(*APIError); ok {
+ /* TODO: restore this conditional
+ if appengine.IsDevAppServer() {
+ */
+ // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+ // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+ if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+ return &t.transaction, ErrConcurrentTransaction
+ }
+ if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+ return &t.transaction, ErrConcurrentTransaction
+ }
+ }
+ return &t.transaction, err
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 00000000..5f727750
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,527 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+ URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
+ URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
+ URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
+ URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
+ URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
+ URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
+ URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
+ URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
+ URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
+ URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+ URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
+ URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
+ URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_URL",
+ 2: "FETCH_ERROR",
+ 3: "UNSPECIFIED_ERROR",
+ 4: "RESPONSE_TOO_LARGE",
+ 5: "DEADLINE_EXCEEDED",
+ 6: "SSL_CERTIFICATE_ERROR",
+ 7: "DNS_ERROR",
+ 8: "CLOSED",
+ 9: "INTERNAL_TRANSIENT_ERROR",
+ 10: "TOO_MANY_REDIRECTS",
+ 11: "MALFORMED_REPLY",
+ 12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_URL": 1,
+ "FETCH_ERROR": 2,
+ "UNSPECIFIED_ERROR": 3,
+ "RESPONSE_TOO_LARGE": 4,
+ "DEADLINE_EXCEEDED": 5,
+ "SSL_CERTIFICATE_ERROR": 6,
+ "DNS_ERROR": 7,
+ "CLOSED": 8,
+ "INTERNAL_TRANSIENT_ERROR": 9,
+ "TOO_MANY_REDIRECTS": 10,
+ "MALFORMED_REPLY": 11,
+ "CONNECTION_ERROR": 12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+ p := new(URLFetchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+ return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchServiceError_ErrorCode(value)
+ return nil
+}
+func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+ URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
+ URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
+ URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
+ URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
+ URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+ URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "PATCH": 6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+ p := new(URLFetchRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+ return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchRequest_RequestMethod(value)
+ return nil
+}
+func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
+}
+
+type URLFetchServiceError struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage() {}
+func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
+}
+func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
+}
+func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
+}
+func (m *URLFetchServiceError) XXX_Size() int {
+ return xxx_messageInfo_URLFetchServiceError.Size(m)
+}
+func (m *URLFetchServiceError) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
+
+type URLFetchRequest struct {
+ Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+ Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
+ Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
+ Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
+ FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
+ Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
+ MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage() {}
+func (*URLFetchRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
+}
+func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
+}
+func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchRequest.Merge(dst, src)
+}
+func (m *URLFetchRequest) XXX_Size() int {
+ return xxx_messageInfo_URLFetchRequest.Size(m)
+}
+func (m *URLFetchRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+ if m != nil && m.FollowRedirects != nil {
+ return *m.FollowRedirects
+ }
+ return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+ if m != nil && m.MustValidateServerCertificate != nil {
+ return *m.MustValidateServerCertificate
+ }
+ return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage() {}
+func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
+}
+func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
+}
+func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
+}
+func (m *URLFetchRequest_Header) XXX_Size() int {
+ return xxx_messageInfo_URLFetchRequest_Header.Size(m)
+}
+func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
+
+func (m *URLFetchRequest_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type URLFetchResponse struct {
+ Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
+ StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
+ Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
+ ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
+ ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
+ ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
+ FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
+ ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
+ ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
+ ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage() {}
+func (*URLFetchResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
+}
+func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
+}
+func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchResponse.Merge(dst, src)
+}
+func (m *URLFetchResponse) XXX_Size() int {
+ return xxx_messageInfo_URLFetchResponse.Size(m)
+}
+func (m *URLFetchResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+ if m != nil && m.StatusCode != nil {
+ return *m.StatusCode
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+ if m != nil && m.ContentWasTruncated != nil {
+ return *m.ContentWasTruncated
+ }
+ return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+ if m != nil && m.ExternalBytesSent != nil {
+ return *m.ExternalBytesSent
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+ if m != nil && m.ExternalBytesReceived != nil {
+ return *m.ExternalBytesReceived
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+ if m != nil && m.FinalUrl != nil {
+ return *m.FinalUrl
+ }
+ return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+ if m != nil && m.ApiCpuMilliseconds != nil {
+ return *m.ApiCpuMilliseconds
+ }
+ return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+ if m != nil && m.ApiBytesSent != nil {
+ return *m.ApiBytesSent
+ }
+ return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+ if m != nil && m.ApiBytesReceived != nil {
+ return *m.ApiBytesReceived
+ }
+ return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage() {}
+func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
+ return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
+}
+func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
+}
+func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
+}
+func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
+}
+func (m *URLFetchResponse_Header) XXX_Size() int {
+ return xxx_messageInfo_URLFetchResponse_Header.Size(m)
+}
+func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
+ xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
+
+func (m *URLFetchResponse_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError")
+ proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest")
+ proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header")
+ proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse")
+ proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
+}
+
+var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
+ // 770 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
+ 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
+ 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e,
+ 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d,
+ 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b,
+ 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27,
+ 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92,
+ 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7,
+ 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17,
+ 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec,
+ 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c,
+ 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a,
+ 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01,
+ 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14,
+ 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f,
+ 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07,
+ 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87,
+ 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a,
+ 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a,
+ 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37,
+ 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc,
+ 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde,
+ 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71,
+ 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17,
+ 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea,
+ 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4,
+ 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6,
+ 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96,
+ 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d,
+ 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d,
+ 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb,
+ 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad,
+ 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86,
+ 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20,
+ 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e,
+ 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f,
+ 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21,
+ 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c,
+ 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b,
+ 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6,
+ 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02,
+ 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b,
+ 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9,
+ 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e,
+ 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97,
+ 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3,
+ 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8,
+ 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05,
+ 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 00000000..f695edf6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_URL = 1;
+ FETCH_ERROR = 2;
+ UNSPECIFIED_ERROR = 3;
+ RESPONSE_TOO_LARGE = 4;
+ DEADLINE_EXCEEDED = 5;
+ SSL_CERTIFICATE_ERROR = 6;
+ DNS_ERROR = 7;
+ CLOSED = 8;
+ INTERNAL_TRANSIENT_ERROR = 9;
+ TOO_MANY_REDIRECTS = 10;
+ MALFORMED_REPLY = 11;
+ CONNECTION_ERROR = 12;
+ }
+}
+
+message URLFetchRequest {
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ PATCH = 6;
+ }
+ required RequestMethod Method = 1;
+ required string Url = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bytes Payload = 6 [ctype=CORD];
+
+ optional bool FollowRedirects = 7 [default=true];
+
+ optional double Deadline = 8;
+
+ optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+ optional bytes Content = 1;
+ required int32 StatusCode = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bool ContentWasTruncated = 6 [default=false];
+ optional int64 ExternalBytesSent = 7;
+ optional int64 ExternalBytesReceived = 8;
+
+ optional string FinalUrl = 9;
+
+ optional int64 ApiCpuMilliseconds = 10 [default=0];
+ optional int64 ApiBytesSent = 11 [default=0];
+ optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 00000000..6ffe1e6d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch // import "google.golang.org/appengine/urlfetch"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+ Context context.Context
+
+ // Controls whether the application checks the validity of SSL certificates
+ // over HTTPS connections. A value of false (the default) instructs the
+ // application to send a request to the server only if the certificate is
+ // valid and signed by a trusted certificate authority (CA), and also
+ // includes a hostname that matches the certificate. A value of true
+ // instructs the application to perform no certificate validation.
+ AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &Transport{
+ Context: ctx,
+ },
+ }
+}
+
+type bodyReader struct {
+ content []byte
+ truncated bool
+ closed bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+ if t := http.StatusText(code); t != "" {
+ return t
+ }
+ return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+ if br.closed {
+ if br.truncated {
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, br.content)
+ if n > 0 {
+ br.content = br.content[n:]
+ return
+ }
+ if br.truncated {
+ br.closed = true
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+ br.closed = true
+ br.content = nil
+ return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+ "POST": true,
+ "PUT": true,
+ "PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+ if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+ return u.String()
+ }
+ aux := *u
+ aux.Opaque = "//" + aux.Host + aux.Opaque
+ return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+ if !ok {
+ return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+ }
+
+ method := pb.URLFetchRequest_RequestMethod(methNum)
+
+ freq := &pb.URLFetchRequest{
+ Method: &method,
+ Url: proto.String(urlString(req.URL)),
+ FollowRedirects: proto.Bool(false), // http.Client's responsibility
+ MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+ }
+ if deadline, ok := t.Context.Deadline(); ok {
+ freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+ }
+
+ for k, vals := range req.Header {
+ for _, val := range vals {
+ freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+ Key: proto.String(k),
+ Value: proto.String(val),
+ })
+ }
+ }
+ if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+ // Avoid a []byte copy if req.Body has a Bytes method.
+ switch b := req.Body.(type) {
+ case interface {
+ Bytes() []byte
+ }:
+ freq.Payload = b.Bytes()
+ default:
+ freq.Payload, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ fres := &pb.URLFetchResponse{}
+ if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+ return nil, err
+ }
+
+ res = &http.Response{}
+ res.StatusCode = int(*fres.StatusCode)
+ res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+ res.Header = make(http.Header)
+ res.Request = req
+
+ // Faked:
+ res.ProtoMajor = 1
+ res.ProtoMinor = 1
+ res.Proto = "HTTP/1.1"
+ res.Close = true
+
+ for _, h := range fres.Header {
+ hkey := http.CanonicalHeaderKey(*h.Key)
+ hval := *h.Value
+ if hkey == "Content-Length" {
+ // Will get filled in below for all but HEAD requests.
+ if req.Method == "HEAD" {
+ res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+ }
+ continue
+ }
+ res.Header.Add(hkey, hval)
+ }
+
+ if req.Method != "HEAD" {
+ res.ContentLength = int64(len(fres.Content))
+ }
+
+ truncated := fres.GetContentWasTruncated()
+ res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+ return
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}