diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..d7b881e
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,286 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Emacs template
+# -*- mode: gitignore; -*-
+*~
+\#*\#
+/.emacs.desktop
+/.emacs.desktop.lock
+*.elc
+auto-save-list
+tramp
+.\#*
+
+# Org-mode
+.org-id-locations
+*_archive
+
+# flymake-mode
+*_flymake.*
+
+# eshell files
+/eshell/history
+/eshell/lastdir
+
+# elpa packages
+/elpa/
+
+# reftex files
+*.rel
+
+# AUCTeX auto folder
+/auto/
+
+# cask packages
+.cask/
+dist/
+
+# Flycheck
+flycheck_*.el
+
+# server auth directory
+/server/
+
+# projectiles files
+.projectile
+
+# directory configuration
+.dir-locals.el
+
+# network security
+/network-security.data
+
+
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/artifacts
+# .idea/compiler.xml
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+# *.iml
+# *.ipr
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+# Android studio 3.1+ serialized cache file
+.idea/caches/build_file_checksums.ser
+
+### Vim template
+# Swap
+[._]*.s[a-v][a-z]
+!*.svg # comment out if you don't need vector files
+[._]*.sw[a-p]
+[._]s[a-rt-v][a-z]
+[._]ss[a-gi-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+Sessionx.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
+# Persistent undo
+[._]*.un~
+
+### macOS template
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### Xcode template
+# Xcode
+#
+# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
+
+## User settings
+xcuserdata/
+
+## compatibility with Xcode 8 and earlier (ignoring not required starting Xcode 9)
+*.xcscmblueprint
+*.xccheckout
+
+## compatibility with Xcode 3 and earlier (ignoring not required starting Xcode 4)
+build/
+DerivedData/
+*.moved-aside
+*.pbxuser
+!default.pbxuser
+*.mode1v3
+!default.mode1v3
+*.mode2v3
+!default.mode2v3
+*.perspectivev3
+!default.perspectivev3
+
+## Gcc Patch
+/*.gcno
+
+### Go template
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+### Eclipse template
+.metadata
+bin/
+tmp/
+*.tmp
+*.bak
+*.swp
+*~.nib
+local.properties
+.settings/
+.loadpath
+.recommenders
+
+# External tool builders
+.externalToolBuilders/
+
+# Locally stored "Eclipse launch configurations"
+*.launch
+
+# PyDev specific (Python IDE for Eclipse)
+*.pydevproject
+
+# CDT-specific (C/C++ Development Tooling)
+.cproject
+
+# CDT- autotools
+.autotools
+
+# Java annotation processor (APT)
+.factorypath
+
+# PDT-specific (PHP Development Tools)
+.buildpath
+
+# sbteclipse plugin
+.target
+
+# Tern plugin
+.tern-project
+
+# TeXlipse plugin
+.texlipse
+
+# STS (Spring Tool Suite)
+.springBeans
+
+# Code Recommenders
+.recommenders/
+
+# Annotation Processing
+.apt_generated/
+.apt_generated_test/
+
+# Scala IDE specific (Scala & Java development for Eclipse)
+.cache-main
+.scala_dependencies
+.worksheet
+
+### VisualStudioCode template
+.vscode/*
+!.vscode/settings.json
+!.vscode/tasks.json
+!.vscode/launch.json
+!.vscode/extensions.json
+*.code-workspace
+
+./rc-*
+rc-camera
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..a556c4f
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,19 @@
+FROM docker.io/golang:1.17 as gobuilder
+
+FROM docker.io/cyrilix/tflite-builder:v2.6.0
+
+COPY --from=gobuilder /usr/local/go /usr/local/go
+ENV GOPATH /go
+ENV PATH /usr/local/go/bin:$GOPATH/bin:/usr/local/go/bin:$PATH
+
+WORKDIR /src
+
+ADD go.mod .
+ADD go.sum .
+ADD vendor/ vendor
+ADD pkg/ pkg
+ADD cmd/ cmd
+
+RUN CGO_CPPFLAGS="-I/usr/local/include" \
+ CGO_LDFLAGS="-L/usr/local/lib/x86_64-linux-gnu" \
+ go build -v -a ./cmd/rc-steering
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/build-docker.sh b/build-docker.sh
new file mode 100755
index 0000000..efb658c
--- /dev/null
+++ b/build-docker.sh
@@ -0,0 +1,120 @@
+#! /bin/bash
+
+IMAGE_NAME=robocar-steering-tflite-edgetpu
+TAG=$(git describe)
+FULL_IMAGE_NAME=docker.io/cyrilix/${IMAGE_NAME}:${TAG}
+BINARY=rc-steering
+TFLITE_VERSION=2.6.0
+GOLANG_VERSION=1.17
+
+GOTAGS="-tags netgo"
+BUILDER_CONTAINER="${IMAGE_NAME}-builder"
+
+image_build_binaries(){
+ local containerName="$BUILDER_CONTAINER"
+
+ buildah --os "linux" --arch "amd64" --name "$containerName" from "docker.io/cyrilix/tflite-builder:v${TFLITE_VERSION}"
+
+ printf "Copy go binary\n"
+ buildah copy --from=docker.io/library/golang:${GOLANG_VERSION} "$containerName" /usr/local/go /usr/local/go
+ buildah config \
+ --env GOPATH=/go \
+ --env PATH=/usr/local/go/bin:/go/bin:/usr/local/go/bin:/usr/local/bin:/usr/bin:/bin \
+ $containerName
+
+ buildah config --workingdir /src $containerName
+
+ printf "Copy go sources\n"
+ buildah add $containerName go.mod .
+ buildah add $containerName go.sum .
+ buildah add $containerName vendor/ vendor
+ buildah add $containerName pkg/ pkg
+ buildah add $containerName cmd/ cmd
+
+ buildah run $containerName ln -s /usr/lib/arm-linux-gnueabihf/libedgetpu.so.1 /usr/lib/arm-linux-gnueabihf/libedgetpu.so
+ buildah run $containerName ln -s /usr/lib/aarch64-linux-gnu/libedgetpu.so.1 /usr/lib/aarch64-linux-gnu/libedgetpu.so
+
+ printf "Compile for linux/amd64\n"
+ buildah run \
+ --env CGO_ENABLED=1 \
+ --env CC=gcc \
+ --env CXX=g++ \
+ --env GOOS=linux \
+ --env GOARCH=amd64 \
+ --env GOARM=${GOARM} \
+ --env CGO_CPPFLAGS="-I/usr/local/include" \
+ --env CGO_LDFLAGS="-L /usr/local/lib/x86_64-linux-gnu -L /usr/lib/x86_64-linux-gnu" \
+ $containerName \
+ go build -a -o rc-steering.amd64 ./cmd/rc-steering
+ #--env CGO_CXXFLAGS="--std=c++1z" \
+
+ printf "Compile for linux/arm/v7\n"
+ buildah run \
+ --env CGO_ENABLED=1 \
+ --env CC=arm-linux-gnueabihf-gcc \
+ --env CXX=arm-linux-gnueabihf-g++ \
+ --env GOOS=linux \
+ --env GOARCH=arm \
+ --env GOARM=7 \
+ --env CGO_CPPFLAGS="-I/usr/local/include" \
+ --env CGO_LDFLAGS="-L /usr/lib/arm-linux-gnueabihf -L /usr/local/lib/arm-linux-gnueabihf" \
+ $containerName \
+ go build -a -o rc-steering.armhf ./cmd/rc-steering
+
+ printf "Compile for linux/arm64\n"
+ buildah run \
+ --env CGO_ENABLED=1 \
+ --env CC=aarch64-linux-gnu-gcc \
+ --env CXX=aarch64-linux-gnu-g++ \
+ --env GOOS=linux \
+ --env GOARCH=arm64 \
+ --env CGO_CPPFLAGS="-I/usr/local/include" \
+ --env CGO_LDFLAGS="-L /usr/lib/aarch64-linux-gnu -L /usr/local/lib/aarch64-linux-gnu" \
+ $containerName \
+ go build -a -o rc-steering.arm64 ./cmd/rc-steering
+}
+
+
+image_build(){
+ local platform=$1
+
+
+ GOOS=$(echo $platform | cut -f1 -d/) && \
+ GOARCH=$(echo $platform | cut -f2 -d/) && \
+ GOARM=$(echo $platform | cut -f3 -d/ | sed "s/v//" )
+ VARIANT="--variant $(echo $platform | cut -f3 -d/ )"
+ if [[ -z "$GOARM" ]] ;
+ then
+ VARIANT=""
+ binary_suffix="$GOARCH"
+ else
+ binary_suffix="armhf"
+ fi
+
+
+ local containerName="$IMAGE_NAME-$GOARCH$GOARM"
+
+
+ buildah --os "$GOOS" --arch "$GOARCH" $VARIANT --name "$containerName" from "docker.io/cyrilix/tflite-runtime:v${TFLITE_VERSION}"
+ buildah config --user 1234 "$containerName"
+ buildah copy --from="$BUILDER_CONTAINER" "$containerName" "/src/${BINARY}.${binary_suffix}" /go/bin/$BINARY
+ buildah config --entrypoint '["/go/bin/'$BINARY'"]' "${containerName}"
+
+ buildah commit --rm --manifest $IMAGE_NAME "${containerName}" "${containerName}"
+}
+
+#buildah rmi localhost/$IMAGE_NAME
+#buildah manifest rm localhost/${IMAGE_NAME}
+
+image_build_binaries
+
+image_build linux/amd64
+image_build linux/arm64
+image_build linux/arm/v7
+
+
+# push image
+printf "\n\nPush manifest to %s\n\n" "${FULL_IMAGE_NAME}"
+buildah manifest push --rm -f v2s2 "localhost/$IMAGE_NAME" "docker://$FULL_IMAGE_NAME" --all
+
+buildah rm $BUILDER_CONTAINER
\ No newline at end of file
diff --git a/cmd/rc-steering/rc-steering.go b/cmd/rc-steering/rc-steering.go
new file mode 100644
index 0000000..c507f73
--- /dev/null
+++ b/cmd/rc-steering/rc-steering.go
@@ -0,0 +1,65 @@
+package main
+
+import (
+ "flag"
+ "github.com/cyrilix/robocar-base/cli"
+ "github.com/cyrilix/robocar-steering-tflite-edgetpu/pkg/steering"
+ "go.uber.org/zap"
+ "log"
+ "os"
+)
+
+const (
+ DefaultClientId = "robocar-steering-tflite-edgetpu"
+)
+
+func main() {
+ var mqttBroker, username, password, clientId string
+ var cameraTopic, steeringTopic string
+ var modelPath string
+ var edgeVerbosity int
+
+ lgr, err := zap.NewProductionConfig().Build()
+ if err != nil {
+ log.Fatalf("unable to init logger: %v", err)
+ }
+ zap.ReplaceGlobals(lgr)
+
+ mqttQos := cli.InitIntFlag("MQTT_QOS", 0)
+ _, mqttRetain := os.LookupEnv("MQTT_RETAIN")
+
+ cli.InitMqttFlags(DefaultClientId, &mqttBroker, &username, &password, &clientId, &mqttQos, &mqttRetain)
+
+ flag.StringVar(&modelPath, "model", "", "path to model file")
+ flag.StringVar(&steeringTopic, "mqtt-topic-road", os.Getenv("MQTT_TOPIC_STEERING"), "Mqtt topic to publish road detection result, use MQTT_TOPIC_STEERING if args not set")
+ flag.StringVar(&cameraTopic, "mqtt-topic-camera", os.Getenv("MQTT_TOPIC_CAMERA"), "Mqtt topic that contains camera frame values, use MQTT_TOPIC_CAMERA if args not set")
+ flag.IntVar(&edgeVerbosity, "edge-verbosity", 0, "Edge TPU Verbosity")
+
+ flag.Parse()
+ if len(os.Args) <= 1 {
+ flag.PrintDefaults()
+ os.Exit(1)
+ }
+
+ if modelPath == "" {
+ zap.L().Error("model path is mandatory")
+ flag.PrintDefaults()
+ os.Exit(1)
+ }
+
+ client, err := cli.Connect(mqttBroker, username, password, clientId)
+ if err != nil {
+ zap.L().Fatal("unable to connect to mqtt bus", zap.Error(err))
+ }
+ defer client.Disconnect(50)
+
+ p := steering.NewPart(client, steeringTopic, cameraTopic, edgeVerbosity)
+ defer p.Stop()
+
+ cli.HandleExit(p)
+
+ err = p.Start()
+ if err != nil {
+ zap.L().Fatal("unable to start service", zap.Error(err))
+ }
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..c98f8aa
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,21 @@
+module github.com/cyrilix/robocar-steering-tflite-edgetpu
+
+go 1.17
+
+require (
+ github.com/cyrilix/robocar-base v0.1.4
+ github.com/cyrilix/robocar-protobuf/go v1.0.3
+ github.com/eclipse/paho.mqtt.golang v1.3.5
+ github.com/golang/protobuf v1.5.2
+ github.com/mattn/go-tflite v1.0.2-0.20210524070808-ba19dc99415b
+ go.uber.org/zap v1.19.1
+)
+
+require (
+ github.com/gorilla/websocket v1.4.2 // indirect
+ github.com/mattn/go-pointer v0.0.1 // indirect
+ go.uber.org/atomic v1.7.0 // indirect
+ go.uber.org/multierr v1.6.0 // indirect
+ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect
+ google.golang.org/protobuf v1.26.0 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..86c5126
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,198 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cyrilix/robocar-base v0.1.4 h1:nfnjRwAcCfS7xGu6tW9rZhmc/HZIsuDJX5NFhgX5dWE=
+github.com/cyrilix/robocar-base v0.1.4/go.mod h1:Tt04UmbGBiQtU0Cn3wFD0q7XoyokTwIlWYQxThKI+04=
+github.com/cyrilix/robocar-protobuf/go v1.0.3 h1:iPHw2+7FVXG2C4+Th1m11hQ+2RpAQzlxKhc5M7XOa6Q=
+github.com/cyrilix/robocar-protobuf/go v1.0.3/go.mod h1:xb95cK07lYXnKcHZKnGafmAgYRrqZWZgV9LMiJAp+gE=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y=
+github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc=
+github.com/faiface/glhf v0.0.0-20181018222622-82a6317ac380/go.mod h1:zqnPFFIuYFFxl7uH2gYByJwIVKG7fRqlqQCbzAnHs9g=
+github.com/faiface/mainthread v0.0.0-20171120011319-8b78f0a41ae3/go.mod h1:VEPNJUlxl5KdWjDvz6Q1l+rJlxF2i6xqDeGuGAxa87M=
+github.com/faiface/pixel v0.9.0/go.mod h1:WkLfLymV31e/Ogv5OR3vtrNxRktTO3WXGWXiiSEg/j4=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
+github.com/go-gl/gl v0.0.0-20180407155706-68e253793080/go.mod h1:482civXOzJJCPzJ4ZOX/pwvXBWSnzD4OKMdH4ClKGbk=
+github.com/go-gl/gl v0.0.0-20190320180904-bf2b1f2f34d7/go.mod h1:482civXOzJJCPzJ4ZOX/pwvXBWSnzD4OKMdH4ClKGbk=
+github.com/go-gl/glfw v0.0.0-20180426074136-46a8d530c326/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/mathgl v0.0.0-20190416160123-c4601bc793c7/go.mod h1:yhpkQzEiH9yPyxDUGzkmgScbaBVlhC06qodikEM0ZwQ=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
+github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/llgcode/draw2d v0.0.0-20200110163050-b96d8208fcfc/go.mod h1:mVa0dA29Db2S4LVqDYLlsePDzRJLDfdhVZiI15uY0FA=
+github.com/llgcode/ps v0.0.0-20150911083025-f1443b32eedb/go.mod h1:1l8ky+Ew27CMX29uG+a2hNOKpeNYEQjjtiALiBlFQbY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0=
+github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc=
+github.com/mattn/go-tflite v1.0.2-0.20210524070808-ba19dc99415b h1:N+6lhlgfFYrWN3P4PrO08EDctBQ9cdVIaL80KUMw52w=
+github.com/mattn/go-tflite v1.0.2-0.20210524070808-ba19dc99415b/go.mod h1:FzxmEIGLoG5vM6vVmA6gcCsLg1NGbKMebCrZxZohJ1E=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/testcontainers/testcontainers-go v0.9.0/go.mod h1:b22BFXhRbg4PJmeMVWh6ftqjyZHgiIl3w274e9r3C2E=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
+gocv.io/x/gocv v0.27.0/go.mod h1:n4LnYjykU6y9gn48yZf4eLCdtuSb77XxSkW6g0wGf/A=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
+golang.org/x/image v0.0.0-20190321063152-3fc05d484e9f/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190523035834-f03afa92d3ff/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/plot v0.7.0/go.mod h1:2wtU6YrrdQAhAF9+MTd5tOQjrov/zF70b1i99Npjvgo=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/pkg/steering/steering.go b/pkg/steering/steering.go
new file mode 100644
index 0000000..96a1fc6
--- /dev/null
+++ b/pkg/steering/steering.go
@@ -0,0 +1,211 @@
+package steering
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/cyrilix/robocar-base/service"
+ "github.com/cyrilix/robocar-protobuf/go/events"
+ mqtt "github.com/eclipse/paho.mqtt.golang"
+ "github.com/golang/protobuf/proto"
+ "github.com/mattn/go-tflite"
+ "github.com/mattn/go-tflite/delegates/edgetpu"
+ "go.uber.org/zap"
+ "image"
+ "sort"
+)
+
+func NewPart(client mqtt.Client, steeringTopic, cameraTopic string, edgeVerbosity int) *Part {
+ return &Part{
+ client: client,
+ steeringTopic: steeringTopic,
+ cameraTopic: cameraTopic,
+ edgeVebosity: edgeVerbosity,
+ }
+
+}
+
+type Part struct {
+ client mqtt.Client
+ steeringTopic string
+ cameraTopic string
+
+ cancel chan interface{}
+
+ options *tflite.InterpreterOptions
+ interpreter *tflite.Interpreter
+ modelPath string
+ model *tflite.Model
+ edgeVebosity int
+}
+
+func (p *Part) Start() error {
+ p.model = tflite.NewModelFromFile(p.modelPath)
+ if p.model == nil {
+ return fmt.Errorf("cannot load model %v", p.modelPath)
+ }
+
+ // Get the list of devices
+ devices, err := edgetpu.DeviceList()
+ if err != nil {
+ return fmt.Errorf("could not get EdgeTPU devices: %w", err)
+ }
+ if len(devices) == 0 {
+ return fmt.Errorf("no edge TPU devices found")
+ }
+
+ // Print the EdgeTPU version
+ edgetpuVersion, err := edgetpu.Version()
+ if err != nil {
+ return fmt.Errorf("cannot get EdgeTPU version: %w", err)
+ }
+ zap.S().Infof("EdgeTPU Version: %s\n", edgetpuVersion)
+ edgetpu.Verbosity(p.edgeVebosity)
+
+ p.options = tflite.NewInterpreterOptions()
+ //options.SetNumThread(4)
+ p.options.SetErrorReporter(func(msg string, userData interface{}) {
+ zap.L().Error(msg,
+ zap.Any("userData", userData),
+ )
+ }, nil)
+
+ // Add the first EdgeTPU device
+ d := edgetpu.New(devices[0])
+ p.options.AddDelegate(d)
+
+ p.interpreter = tflite.NewInterpreter(p.model, p.options)
+ if p.interpreter == nil {
+ return fmt.Errorf("cannot create interpreter")
+ }
+
+ if err := registerCallbacks(p); err != nil {
+ zap.S().Errorf("unable to register callbacks: %v", err)
+ return err
+ }
+
+ p.cancel = make(chan interface{})
+ <-p.cancel
+ return nil
+}
+
+func (p *Part) Stop() {
+ close(p.cancel)
+ service.StopService("steering", p.client, p.cameraTopic)
+ if p.interpreter != nil {
+ p.interpreter.Delete()
+ }
+ p.interpreter.Delete()
+ if p.options != nil {
+ p.options.Delete()
+ }
+ if p.model != nil {
+ p.model.Delete()
+ }
+}
+
+func (p *Part) onFrame(_ mqtt.Client, message mqtt.Message) {
+ var msg events.FrameMessage
+ err := proto.Unmarshal(message.Payload(), &msg)
+ if err != nil {
+ zap.S().Errorf("unable to unmarshal protobuf %T message: %v", &msg, err)
+ return
+ }
+
+ img, _, err := image.Decode(bytes.NewReader(msg.GetFrame()))
+ if err != nil {
+ zap.L().Error("unable to decode frame", zap.Error(err))
+ }
+
+ steering, confidence, err := p.Value(img)
+ msgSteering := &events.SteeringMessage{
+ Steering: steering,
+ Confidence: confidence,
+ FrameRef: msg.Id,
+ }
+
+ payload, err := proto.Marshal(msgSteering)
+ if err != nil {
+ zap.L().Error("unable to marshal protobuf message", zap.Error(err))
+ }
+ publish(p.client, p.steeringTopic, payload)
+}
+
+func (p *Part) Value(img image.Image) (float32, float32, error) {
+ status := p.interpreter.AllocateTensors()
+ if status != tflite.OK {
+ return 0., 0., fmt.Errorf("tensor allocate failed: %v", status)
+ }
+
+ input := p.interpreter.GetInputTensor(0)
+
+ dx := img.Bounds().Dx()
+ dy := img.Bounds().Dy()
+
+ bb := make([]byte, dx*dy*3)
+ for y := 0; y < 128; y++ {
+ for x := 0; x < 160; x++ {
+ r, g, b, _ := img.At(x, y).RGBA()
+ bb[(y*dx+x)*3+0] = byte(float64(r) / 255.0)
+ bb[(y*dx+x)*3+1] = byte(float64(g) / 255.0)
+ bb[(y*dx+x)*3+2] = byte(float64(b) / 255.0)
+ }
+ }
+ status = input.CopyFromBuffer(bb)
+ if status != tflite.OK {
+ return 0., 0., fmt.Errorf("input copy from buffer failed: %v", status)
+ }
+
+ status = p.interpreter.Invoke()
+ if status != tflite.OK {
+ return 0., 0., fmt.Errorf("invoke failed: %v", status)
+ }
+
+ output := p.interpreter.GetOutputTensor(0)
+ outputSize := output.Dim(output.NumDims() - 1)
+ b := make([]byte, outputSize)
+ type result struct {
+ score float64
+ index int
+ }
+ status = output.CopyToBuffer(&b[0])
+ if status != tflite.OK {
+ return 0., 0., fmt.Errorf("output failed: %v", status)
+ }
+
+ var results []result
+ minScore := 0.2
+ for i := 0; i < outputSize; i++ {
+ score := float64(b[i]) / 255.0
+ if score < minScore {
+ continue
+ }
+ results = append(results, result{score: score, index: i})
+ }
+
+ if len(results) == 0 {
+ zap.L().Warn(fmt.Sprintf("none steering with score > %0.2f found", minScore))
+ return 0., 0., nil
+ }
+ sort.Slice(results, func(i, j int) bool {
+ return results[i].score > results[j].score
+ })
+
+ steering := float64(results[0].index)*(2./float64(outputSize)) - 1
+ zap.L().Debug("found steering",
+ zap.Float64("steering", steering),
+ zap.Float64("score", results[0].score),
+ )
+ return float32(steering), float32(results[0].score), nil
+}
+
+var registerCallbacks = func(p *Part) error {
+ err := service.RegisterCallback(p.client, p.cameraTopic, p.onFrame)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+var publish = func(client mqtt.Client, topic string, payload []byte) {
+ client.Publish(topic, 0, false, payload)
+}
diff --git a/vendor/github.com/cyrilix/robocar-base/LICENSE b/vendor/github.com/cyrilix/robocar-base/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-base/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cyrilix/robocar-base/cli/cli.go b/vendor/github.com/cyrilix/robocar-base/cli/cli.go
new file mode 100644
index 0000000..7824862
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-base/cli/cli.go
@@ -0,0 +1,119 @@
+package cli
+
+import (
+ "flag"
+ "fmt"
+ "github.com/cyrilix/robocar-base/service"
+ MQTT "github.com/eclipse/paho.mqtt.golang"
+ "log"
+ "os"
+ "os/signal"
+ "strconv"
+ "syscall"
+)
+
+func SetDefaultValueFromEnv(value *string, key string, defaultValue string) {
+ if os.Getenv(key) != "" {
+ *value = os.Getenv(key)
+ } else {
+ *value = defaultValue
+ }
+}
+func SetIntDefaultValueFromEnv(value *int, key string, defaultValue int) error {
+ var sVal string
+ if os.Getenv(key) != "" {
+ sVal = os.Getenv(key)
+ val, err := strconv.Atoi(sVal)
+ if err != nil {
+ log.Printf("unable to convert string to int: %v", err)
+ return err
+ }
+ *value = val
+ } else {
+ *value = defaultValue
+ }
+ return nil
+}
+func SetFloat64DefaultValueFromEnv(value *float64, key string, defaultValue float64) error {
+ var sVal string
+ if os.Getenv(key) != "" {
+ sVal = os.Getenv(key)
+ val, err := strconv.ParseFloat(sVal, 64)
+ if err != nil {
+ log.Printf("unable to convert string to float: %v", err)
+ return err
+ }
+ *value = val
+ } else {
+ *value = defaultValue
+ }
+ return nil
+}
+
+func HandleExit(p service.Part) {
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Kill, os.Interrupt, syscall.SIGTERM)
+
+ go func() {
+ <-signals
+ p.Stop()
+ os.Exit(0)
+ }()
+}
+
+func InitMqttFlagSet(flagSet *flag.FlagSet, defaultClientId string, mqttBroker, username, password, clientId *string, mqttQos *int, mqttRetain *bool) {
+ SetDefaultValueFromEnv(clientId, "MQTT_CLIENT_ID", defaultClientId)
+ SetDefaultValueFromEnv(mqttBroker, "MQTT_BROKER", "tcp://127.0.0.1:1883")
+
+ flagSet.StringVar(mqttBroker, "mqtt-broker", *mqttBroker, "Broker Uri, use MQTT_BROKER env if arg not set")
+ flagSet.StringVar(username, "mqtt-username", os.Getenv("MQTT_USERNAME"), "Broker Username, use MQTT_USERNAME env if arg not set")
+ flagSet.StringVar(password, "mqtt-password", os.Getenv("MQTT_PASSWORD"), "Broker Password, MQTT_PASSWORD env if args not set")
+ flagSet.StringVar(clientId, "mqtt-client-id", *clientId, "Mqtt client id, use MQTT_CLIENT_ID env if args not set")
+ flagSet.IntVar(mqttQos, "mqtt-qos", *mqttQos, "Qos to pusblish message, use MQTT_QOS env if arg not set")
+ flagSet.BoolVar(mqttRetain, "mqtt-retain", *mqttRetain, "Retain mqtt message, if not set, true if MQTT_RETAIN env variable is set")
+}
+
+func InitMqttFlags(defaultClientId string, mqttBroker, username, password, clientId *string, mqttQos *int, mqttRetain *bool) {
+ InitMqttFlagSet(flag.CommandLine, defaultClientId, mqttBroker, username, password, clientId, mqttQos, mqttRetain)
+}
+
+func InitIntFlag(key string, defValue int) int {
+ var value int
+ err := SetIntDefaultValueFromEnv(&value, key, defValue)
+ if err != nil {
+ log.Panicf("invalid int value: %v", err)
+ }
+ return value
+}
+
+func InitFloat64Flag(key string, defValue float64) float64 {
+ var value float64
+ err := SetFloat64DefaultValueFromEnv(&value, key, defValue)
+ if err != nil {
+ log.Panicf("invalid value: %v", err)
+ }
+ return value
+}
+
+func Connect(uri, username, password, clientId string) (MQTT.Client, error) {
+ //create a ClientOptions struct setting the broker address, clientid, turn
+ //off trace output and set the default message handler
+ opts := MQTT.NewClientOptions().AddBroker(uri)
+ opts.SetUsername(username)
+ opts.SetPassword(password)
+ opts.SetClientID(clientId)
+ opts.SetAutoReconnect(true)
+ opts.SetDefaultPublishHandler(
+ //define a function for the default message handler
+ func(client MQTT.Client, msg MQTT.Message) {
+ fmt.Printf("TOPIC: %s\n", msg.Topic())
+ fmt.Printf("MSG: %s\n", msg.Payload())
+ })
+
+ //create and start a client using the above ClientOptions
+ client := MQTT.NewClient(opts)
+ if token := client.Connect(); token.Wait() && token.Error() != nil {
+ return nil, fmt.Errorf("unable to connect to mqtt bus: %v", token.Error())
+ }
+ return client, nil
+}
diff --git a/vendor/github.com/cyrilix/robocar-base/service/part.go b/vendor/github.com/cyrilix/robocar-base/service/part.go
new file mode 100644
index 0000000..4a79869
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-base/service/part.go
@@ -0,0 +1,32 @@
+package service
+
+import (
+ "fmt"
+ mqtt "github.com/eclipse/paho.mqtt.golang"
+ "log"
+)
+
+func StopService(name string, client mqtt.Client, topics ...string) {
+ log.Printf("Stop %s service", name)
+ token := client.Unsubscribe(topics...)
+ token.Wait()
+ if token.Error() != nil {
+ log.Printf("unable to unsubscribe service: %v", token.Error())
+ }
+ client.Disconnect(50)
+}
+
+func RegisterCallback(client mqtt.Client, topic string, callback mqtt.MessageHandler) error {
+ log.Printf("Register callback on topic %v", topic)
+ token := client.Subscribe(topic, 0, callback)
+ token.Wait()
+ if token.Error() != nil {
+ return fmt.Errorf("unable to register callback on topic %s: %v", topic, token.Error())
+ }
+ return nil
+}
+
+type Part interface {
+ Start() error
+ Stop()
+}
diff --git a/vendor/github.com/cyrilix/robocar-protobuf/go/LICENSE b/vendor/github.com/cyrilix/robocar-protobuf/go/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-protobuf/go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cyrilix/robocar-protobuf/go/events/events.pb.go b/vendor/github.com/cyrilix/robocar-protobuf/go/events/events.pb.go
new file mode 100644
index 0000000..b5e1bb2
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-protobuf/go/events/events.pb.go
@@ -0,0 +1,1196 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.4
+// source: events/events.proto
+
+package events
+
+import (
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type DriveMode int32
+
+const (
+ DriveMode_INVALID DriveMode = 0
+ DriveMode_USER DriveMode = 1
+ DriveMode_PILOT DriveMode = 2
+)
+
+// Enum value maps for DriveMode.
+var (
+ DriveMode_name = map[int32]string{
+ 0: "INVALID",
+ 1: "USER",
+ 2: "PILOT",
+ }
+ DriveMode_value = map[string]int32{
+ "INVALID": 0,
+ "USER": 1,
+ "PILOT": 2,
+ }
+)
+
+func (x DriveMode) Enum() *DriveMode {
+ p := new(DriveMode)
+ *p = x
+ return p
+}
+
+func (x DriveMode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (DriveMode) Descriptor() protoreflect.EnumDescriptor {
+ return file_events_events_proto_enumTypes[0].Descriptor()
+}
+
+func (DriveMode) Type() protoreflect.EnumType {
+ return &file_events_events_proto_enumTypes[0]
+}
+
+func (x DriveMode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use DriveMode.Descriptor instead.
+func (DriveMode) EnumDescriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{0}
+}
+
+type TypeObject int32
+
+const (
+ TypeObject_ANY TypeObject = 0
+ TypeObject_CAR TypeObject = 1
+ TypeObject_BUMP TypeObject = 2
+ TypeObject_PLOT TypeObject = 3
+)
+
+// Enum value maps for TypeObject.
+var (
+ TypeObject_name = map[int32]string{
+ 0: "ANY",
+ 1: "CAR",
+ 2: "BUMP",
+ 3: "PLOT",
+ }
+ TypeObject_value = map[string]int32{
+ "ANY": 0,
+ "CAR": 1,
+ "BUMP": 2,
+ "PLOT": 3,
+ }
+)
+
+func (x TypeObject) Enum() *TypeObject {
+ p := new(TypeObject)
+ *p = x
+ return p
+}
+
+func (x TypeObject) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (TypeObject) Descriptor() protoreflect.EnumDescriptor {
+ return file_events_events_proto_enumTypes[1].Descriptor()
+}
+
+func (TypeObject) Type() protoreflect.EnumType {
+ return &file_events_events_proto_enumTypes[1]
+}
+
+func (x TypeObject) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use TypeObject.Descriptor instead.
+func (TypeObject) EnumDescriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{1}
+}
+
+type FrameRef struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+ CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
+}
+
+func (x *FrameRef) Reset() {
+ *x = FrameRef{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FrameRef) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FrameRef) ProtoMessage() {}
+
+func (x *FrameRef) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FrameRef.ProtoReflect.Descriptor instead.
+func (*FrameRef) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FrameRef) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *FrameRef) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *FrameRef) GetCreatedAt() *timestamp.Timestamp {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return nil
+}
+
+type FrameMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id *FrameRef `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Frame []byte `protobuf:"bytes,2,opt,name=frame,proto3" json:"frame,omitempty"`
+}
+
+func (x *FrameMessage) Reset() {
+ *x = FrameMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FrameMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FrameMessage) ProtoMessage() {}
+
+func (x *FrameMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FrameMessage.ProtoReflect.Descriptor instead.
+func (*FrameMessage) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FrameMessage) GetId() *FrameRef {
+ if x != nil {
+ return x.Id
+ }
+ return nil
+}
+
+func (x *FrameMessage) GetFrame() []byte {
+ if x != nil {
+ return x.Frame
+ }
+ return nil
+}
+
+type SteeringMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Steering float32 `protobuf:"fixed32,1,opt,name=steering,proto3" json:"steering,omitempty"`
+ Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
+ FrameRef *FrameRef `protobuf:"bytes,3,opt,name=frame_ref,json=frameRef,proto3" json:"frame_ref,omitempty"`
+}
+
+func (x *SteeringMessage) Reset() {
+ *x = SteeringMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SteeringMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SteeringMessage) ProtoMessage() {}
+
+func (x *SteeringMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SteeringMessage.ProtoReflect.Descriptor instead.
+func (*SteeringMessage) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *SteeringMessage) GetSteering() float32 {
+ if x != nil {
+ return x.Steering
+ }
+ return 0
+}
+
+func (x *SteeringMessage) GetConfidence() float32 {
+ if x != nil {
+ return x.Confidence
+ }
+ return 0
+}
+
+func (x *SteeringMessage) GetFrameRef() *FrameRef {
+ if x != nil {
+ return x.FrameRef
+ }
+ return nil
+}
+
+type ThrottleMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Throttle float32 `protobuf:"fixed32,1,opt,name=throttle,proto3" json:"throttle,omitempty"`
+ Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
+ FrameRef *FrameRef `protobuf:"bytes,3,opt,name=frame_ref,json=frameRef,proto3" json:"frame_ref,omitempty"`
+}
+
+func (x *ThrottleMessage) Reset() {
+ *x = ThrottleMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ThrottleMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ThrottleMessage) ProtoMessage() {}
+
+func (x *ThrottleMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ThrottleMessage.ProtoReflect.Descriptor instead.
+func (*ThrottleMessage) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ThrottleMessage) GetThrottle() float32 {
+ if x != nil {
+ return x.Throttle
+ }
+ return 0
+}
+
+func (x *ThrottleMessage) GetConfidence() float32 {
+ if x != nil {
+ return x.Confidence
+ }
+ return 0
+}
+
+func (x *ThrottleMessage) GetFrameRef() *FrameRef {
+ if x != nil {
+ return x.FrameRef
+ }
+ return nil
+}
+
+type DriveModeMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DriveMode DriveMode `protobuf:"varint,1,opt,name=drive_mode,json=driveMode,proto3,enum=robocar.events.DriveMode" json:"drive_mode,omitempty"`
+}
+
+func (x *DriveModeMessage) Reset() {
+ *x = DriveModeMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DriveModeMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DriveModeMessage) ProtoMessage() {}
+
+func (x *DriveModeMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DriveModeMessage.ProtoReflect.Descriptor instead.
+func (*DriveModeMessage) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *DriveModeMessage) GetDriveMode() DriveMode {
+ if x != nil {
+ return x.DriveMode
+ }
+ return DriveMode_INVALID
+}
+
+type ObjectsMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"`
+ FrameRef *FrameRef `protobuf:"bytes,2,opt,name=frame_ref,json=frameRef,proto3" json:"frame_ref,omitempty"`
+}
+
+func (x *ObjectsMessage) Reset() {
+ *x = ObjectsMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ObjectsMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ObjectsMessage) ProtoMessage() {}
+
+func (x *ObjectsMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ObjectsMessage.ProtoReflect.Descriptor instead.
+func (*ObjectsMessage) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ObjectsMessage) GetObjects() []*Object {
+ if x != nil {
+ return x.Objects
+ }
+ return nil
+}
+
+func (x *ObjectsMessage) GetFrameRef() *FrameRef {
+ if x != nil {
+ return x.FrameRef
+ }
+ return nil
+}
+
+// BoundingBox that contains an object
+type Object struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type TypeObject `protobuf:"varint,1,opt,name=type,proto3,enum=robocar.events.TypeObject" json:"type,omitempty"`
+ Left int32 `protobuf:"varint,2,opt,name=left,proto3" json:"left,omitempty"`
+ Top int32 `protobuf:"varint,3,opt,name=top,proto3" json:"top,omitempty"`
+ Right int32 `protobuf:"varint,4,opt,name=right,proto3" json:"right,omitempty"`
+ Bottom int32 `protobuf:"varint,5,opt,name=bottom,proto3" json:"bottom,omitempty"`
+ Confidence float32 `protobuf:"fixed32,6,opt,name=confidence,proto3" json:"confidence,omitempty"`
+}
+
+func (x *Object) Reset() {
+ *x = Object{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Object) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Object) ProtoMessage() {}
+
+func (x *Object) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Object.ProtoReflect.Descriptor instead.
+func (*Object) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Object) GetType() TypeObject {
+ if x != nil {
+ return x.Type
+ }
+ return TypeObject_ANY
+}
+
+func (x *Object) GetLeft() int32 {
+ if x != nil {
+ return x.Left
+ }
+ return 0
+}
+
+func (x *Object) GetTop() int32 {
+ if x != nil {
+ return x.Top
+ }
+ return 0
+}
+
+func (x *Object) GetRight() int32 {
+ if x != nil {
+ return x.Right
+ }
+ return 0
+}
+
+func (x *Object) GetBottom() int32 {
+ if x != nil {
+ return x.Bottom
+ }
+ return 0
+}
+
+func (x *Object) GetConfidence() float32 {
+ if x != nil {
+ return x.Confidence
+ }
+ return 0
+}
+
+type SwitchRecordMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+}
+
+func (x *SwitchRecordMessage) Reset() {
+ *x = SwitchRecordMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SwitchRecordMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SwitchRecordMessage) ProtoMessage() {}
+
+func (x *SwitchRecordMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SwitchRecordMessage.ProtoReflect.Descriptor instead.
+func (*SwitchRecordMessage) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *SwitchRecordMessage) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
+ }
+ return false
+}
+
+// Road description
+type RoadMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Contour []*Point `protobuf:"bytes,1,rep,name=contour,proto3" json:"contour,omitempty"` // list of points that describes road contour
+ Ellipse *Ellipse `protobuf:"bytes,2,opt,name=ellipse,proto3" json:"ellipse,omitempty"` // rotated ellipse that fit road contour
+ FrameRef *FrameRef `protobuf:"bytes,3,opt,name=frame_ref,json=frameRef,proto3" json:"frame_ref,omitempty"` // Reference to the frame used for compute
+}
+
+func (x *RoadMessage) Reset() {
+ *x = RoadMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RoadMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RoadMessage) ProtoMessage() {}
+
+func (x *RoadMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RoadMessage.ProtoReflect.Descriptor instead.
+func (*RoadMessage) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *RoadMessage) GetContour() []*Point {
+ if x != nil {
+ return x.Contour
+ }
+ return nil
+}
+
+func (x *RoadMessage) GetEllipse() *Ellipse {
+ if x != nil {
+ return x.Ellipse
+ }
+ return nil
+}
+
+func (x *RoadMessage) GetFrameRef() *FrameRef {
+ if x != nil {
+ return x.FrameRef
+ }
+ return nil
+}
+
+type Point struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
+ Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
+}
+
+func (x *Point) Reset() {
+ *x = Point{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Point) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Point) ProtoMessage() {}
+
+func (x *Point) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Point.ProtoReflect.Descriptor instead.
+func (*Point) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Point) GetX() int32 {
+ if x != nil {
+ return x.X
+ }
+ return 0
+}
+
+func (x *Point) GetY() int32 {
+ if x != nil {
+ return x.Y
+ }
+ return 0
+}
+
+type Ellipse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Center *Point `protobuf:"bytes,1,opt,name=center,proto3" json:"center,omitempty"`
+ Width int32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"`
+ Height int32 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
+ Angle float32 `protobuf:"fixed32,4,opt,name=angle,proto3" json:"angle,omitempty"`
+ Confidence float32 `protobuf:"fixed32,5,opt,name=confidence,proto3" json:"confidence,omitempty"`
+}
+
+func (x *Ellipse) Reset() {
+ *x = Ellipse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Ellipse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Ellipse) ProtoMessage() {}
+
+func (x *Ellipse) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Ellipse.ProtoReflect.Descriptor instead.
+func (*Ellipse) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *Ellipse) GetCenter() *Point {
+ if x != nil {
+ return x.Center
+ }
+ return nil
+}
+
+func (x *Ellipse) GetWidth() int32 {
+ if x != nil {
+ return x.Width
+ }
+ return 0
+}
+
+func (x *Ellipse) GetHeight() int32 {
+ if x != nil {
+ return x.Height
+ }
+ return 0
+}
+
+func (x *Ellipse) GetAngle() float32 {
+ if x != nil {
+ return x.Angle
+ }
+ return 0
+}
+
+func (x *Ellipse) GetConfidence() float32 {
+ if x != nil {
+ return x.Confidence
+ }
+ return 0
+}
+
+// Record message used to tensorflow learning
+type RecordMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Frame *FrameMessage `protobuf:"bytes,1,opt,name=frame,proto3" json:"frame,omitempty"`
+ Steering *SteeringMessage `protobuf:"bytes,2,opt,name=steering,proto3" json:"steering,omitempty"`
+ RecordSet string `protobuf:"bytes,3,opt,name=recordSet,proto3" json:"recordSet,omitempty"` // Record set name
+}
+
+func (x *RecordMessage) Reset() {
+ *x = RecordMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_events_events_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RecordMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RecordMessage) ProtoMessage() {}
+
+func (x *RecordMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_events_events_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RecordMessage.ProtoReflect.Descriptor instead.
+func (*RecordMessage) Descriptor() ([]byte, []int) {
+ return file_events_events_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *RecordMessage) GetFrame() *FrameMessage {
+ if x != nil {
+ return x.Frame
+ }
+ return nil
+}
+
+func (x *RecordMessage) GetSteering() *SteeringMessage {
+ if x != nil {
+ return x.Steering
+ }
+ return nil
+}
+
+func (x *RecordMessage) GetRecordSet() string {
+ if x != nil {
+ return x.RecordSet
+ }
+ return ""
+}
+
+var File_events_events_proto protoreflect.FileDescriptor
+
+var file_events_events_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x69, 0x0a, 0x08, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52,
+ 0x65, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41,
+ 0x74, 0x22, 0x4e, 0x0a, 0x0c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x12, 0x28, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e,
+ 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x46,
+ 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x66, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66,
+ 0x72, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d,
+ 0x65, 0x22, 0x84, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x65, 0x65, 0x72, 0x69, 0x6e,
+ 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x73, 0x74, 0x65, 0x65, 0x72, 0x69, 0x6e,
+ 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63,
+ 0x65, 0x12, 0x35, 0x0a, 0x09, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x66, 0x52, 0x08,
+ 0x66, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x66, 0x22, 0x84, 0x01, 0x0a, 0x0f, 0x54, 0x68, 0x72,
+ 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08,
+ 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08,
+ 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x09, 0x66, 0x72, 0x61, 0x6d,
+ 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x72, 0x6f,
+ 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x46, 0x72, 0x61,
+ 0x6d, 0x65, 0x52, 0x65, 0x66, 0x52, 0x08, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x66, 0x22,
+ 0x4c, 0x0a, 0x10, 0x44, 0x72, 0x69, 0x76, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x12, 0x38, 0x0a, 0x0a, 0x64, 0x72, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x6f, 0x64,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61,
+ 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x44, 0x72, 0x69, 0x76, 0x65, 0x4d, 0x6f,
+ 0x64, 0x65, 0x52, 0x09, 0x64, 0x72, 0x69, 0x76, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x79, 0x0a,
+ 0x0e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12,
+ 0x30, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x16, 0x2e, 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74,
+ 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x12, 0x35, 0x0a, 0x09, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x66, 0x52, 0x08,
+ 0x66, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x66, 0x22, 0xac, 0x01, 0x0a, 0x06, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x1a, 0x2e, 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x65, 0x66, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x04, 0x6c, 0x65, 0x66, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x6f, 0x70, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x74, 0x6f, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x69, 0x67,
+ 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x72, 0x69, 0x67, 0x68, 0x74, 0x12,
+ 0x16, 0x0a, 0x06, 0x62, 0x6f, 0x74, 0x74, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x06, 0x62, 0x6f, 0x74, 0x74, 0x6f, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x2f, 0x0a, 0x13, 0x53, 0x77, 0x69, 0x74, 0x63,
+ 0x68, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18,
+ 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xa8, 0x01, 0x0a, 0x0b, 0x52, 0x6f, 0x61,
+ 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74,
+ 0x6f, 0x75, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x6f, 0x62, 0x6f,
+ 0x63, 0x61, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74,
+ 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x6f, 0x75, 0x72, 0x12, 0x31, 0x0a, 0x07, 0x65, 0x6c, 0x6c,
+ 0x69, 0x70, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x6f, 0x62,
+ 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x45, 0x6c, 0x6c, 0x69,
+ 0x70, 0x73, 0x65, 0x52, 0x07, 0x65, 0x6c, 0x6c, 0x69, 0x70, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x09,
+ 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x18, 0x2e, 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
+ 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x66, 0x52, 0x08, 0x66, 0x72, 0x61, 0x6d, 0x65,
+ 0x52, 0x65, 0x66, 0x22, 0x23, 0x0a, 0x05, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x0c, 0x0a, 0x01,
+ 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x01, 0x78, 0x12, 0x0c, 0x0a, 0x01, 0x79, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x01, 0x79, 0x22, 0x9c, 0x01, 0x0a, 0x07, 0x45, 0x6c, 0x6c,
+ 0x69, 0x70, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x65, 0x6e,
+ 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69,
+ 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68,
+ 0x74, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x6e, 0x67, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x02,
+ 0x52, 0x05, 0x61, 0x6e, 0x67, 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x6f,
+ 0x72, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x66, 0x72, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x72, 0x6f, 0x62, 0x6f, 0x63,
+ 0x61, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a,
+ 0x08, 0x73, 0x74, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1f, 0x2e, 0x72, 0x6f, 0x62, 0x6f, 0x63, 0x61, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
+ 0x2e, 0x53, 0x74, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x52, 0x08, 0x73, 0x74, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65,
+ 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72,
+ 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2a, 0x2d, 0x0a, 0x09, 0x44, 0x72, 0x69, 0x76,
+ 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44,
+ 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x55, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05,
+ 0x50, 0x49, 0x4c, 0x4f, 0x54, 0x10, 0x02, 0x2a, 0x32, 0x0a, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x07,
+ 0x0a, 0x03, 0x43, 0x41, 0x52, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x55, 0x4d, 0x50, 0x10,
+ 0x02, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4c, 0x4f, 0x54, 0x10, 0x03, 0x42, 0x0a, 0x5a, 0x08, 0x2e,
+ 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_events_events_proto_rawDescOnce sync.Once
+ file_events_events_proto_rawDescData = file_events_events_proto_rawDesc
+)
+
+func file_events_events_proto_rawDescGZIP() []byte {
+ file_events_events_proto_rawDescOnce.Do(func() {
+ file_events_events_proto_rawDescData = protoimpl.X.CompressGZIP(file_events_events_proto_rawDescData)
+ })
+ return file_events_events_proto_rawDescData
+}
+
+var file_events_events_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_events_events_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_events_events_proto_goTypes = []interface{}{
+ (DriveMode)(0), // 0: robocar.events.DriveMode
+ (TypeObject)(0), // 1: robocar.events.TypeObject
+ (*FrameRef)(nil), // 2: robocar.events.FrameRef
+ (*FrameMessage)(nil), // 3: robocar.events.FrameMessage
+ (*SteeringMessage)(nil), // 4: robocar.events.SteeringMessage
+ (*ThrottleMessage)(nil), // 5: robocar.events.ThrottleMessage
+ (*DriveModeMessage)(nil), // 6: robocar.events.DriveModeMessage
+ (*ObjectsMessage)(nil), // 7: robocar.events.ObjectsMessage
+ (*Object)(nil), // 8: robocar.events.Object
+ (*SwitchRecordMessage)(nil), // 9: robocar.events.SwitchRecordMessage
+ (*RoadMessage)(nil), // 10: robocar.events.RoadMessage
+ (*Point)(nil), // 11: robocar.events.Point
+ (*Ellipse)(nil), // 12: robocar.events.Ellipse
+ (*RecordMessage)(nil), // 13: robocar.events.RecordMessage
+ (*timestamp.Timestamp)(nil), // 14: google.protobuf.Timestamp
+}
+var file_events_events_proto_depIdxs = []int32{
+ 14, // 0: robocar.events.FrameRef.created_at:type_name -> google.protobuf.Timestamp
+ 2, // 1: robocar.events.FrameMessage.id:type_name -> robocar.events.FrameRef
+ 2, // 2: robocar.events.SteeringMessage.frame_ref:type_name -> robocar.events.FrameRef
+ 2, // 3: robocar.events.ThrottleMessage.frame_ref:type_name -> robocar.events.FrameRef
+ 0, // 4: robocar.events.DriveModeMessage.drive_mode:type_name -> robocar.events.DriveMode
+ 8, // 5: robocar.events.ObjectsMessage.objects:type_name -> robocar.events.Object
+ 2, // 6: robocar.events.ObjectsMessage.frame_ref:type_name -> robocar.events.FrameRef
+ 1, // 7: robocar.events.Object.type:type_name -> robocar.events.TypeObject
+ 11, // 8: robocar.events.RoadMessage.contour:type_name -> robocar.events.Point
+ 12, // 9: robocar.events.RoadMessage.ellipse:type_name -> robocar.events.Ellipse
+ 2, // 10: robocar.events.RoadMessage.frame_ref:type_name -> robocar.events.FrameRef
+ 11, // 11: robocar.events.Ellipse.center:type_name -> robocar.events.Point
+ 3, // 12: robocar.events.RecordMessage.frame:type_name -> robocar.events.FrameMessage
+ 4, // 13: robocar.events.RecordMessage.steering:type_name -> robocar.events.SteeringMessage
+ 14, // [14:14] is the sub-list for method output_type
+ 14, // [14:14] is the sub-list for method input_type
+ 14, // [14:14] is the sub-list for extension type_name
+ 14, // [14:14] is the sub-list for extension extendee
+ 0, // [0:14] is the sub-list for field type_name
+}
+
+func init() { file_events_events_proto_init() }
+func file_events_events_proto_init() {
+ if File_events_events_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_events_events_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FrameRef); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FrameMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SteeringMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ThrottleMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DriveModeMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ObjectsMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Object); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SwitchRecordMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RoadMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Point); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Ellipse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_events_events_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RecordMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_events_events_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_events_events_proto_goTypes,
+ DependencyIndexes: file_events_events_proto_depIdxs,
+ EnumInfos: file_events_events_proto_enumTypes,
+ MessageInfos: file_events_events_proto_msgTypes,
+ }.Build()
+ File_events_events_proto = out.File
+ file_events_events_proto_rawDesc = nil
+ file_events_events_proto_goTypes = nil
+ file_events_events_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore b/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore
new file mode 100644
index 0000000..47bb0de
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore
@@ -0,0 +1,36 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+*.msg
+*.lok
+
+samples/trivial
+samples/trivial2
+samples/sample
+samples/reconnect
+samples/ssl
+samples/custom_store
+samples/simple
+samples/stdinpub
+samples/stdoutsub
+samples/routing
\ No newline at end of file
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md
new file mode 100644
index 0000000..9791dc6
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md
@@ -0,0 +1,56 @@
+Contributing to Paho
+====================
+
+Thanks for your interest in this project.
+
+Project description:
+--------------------
+
+The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT).
+Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community.
+
+- https://projects.eclipse.org/projects/technology.paho
+
+Developer resources:
+--------------------
+
+Information regarding source code management, builds, coding standards, and more.
+
+- https://projects.eclipse.org/projects/technology.paho/developer
+
+Contributor License Agreement:
+------------------------------
+
+Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA).
+
+- http://www.eclipse.org/legal/CLA.php
+
+Contributing Code:
+------------------
+
+The Go client is developed in Github, see their documentation on the process of forking and pull requests; https://help.github.com/categories/collaborating-on-projects-using-pull-requests/
+
+Git commit messages should follow the style described here;
+
+http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+
+Contact:
+--------
+
+Contact the project developers via the project's "dev" list.
+
+- https://dev.eclipse.org/mailman/listinfo/paho-dev
+
+Search for bugs:
+----------------
+
+This project uses Github issues to track ongoing development and issues.
+
+- https://github.com/eclipse/paho.mqtt.golang/issues
+
+Create a new bug:
+-----------------
+
+Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome!
+
+- https://github.com/eclipse/paho.mqtt.golang/issues
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION
new file mode 100644
index 0000000..34e4973
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION
@@ -0,0 +1,15 @@
+
+
+Eclipse Distribution License - v 1.0
+
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE
new file mode 100644
index 0000000..9b9a9eb
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE
@@ -0,0 +1,20 @@
+This project is dual licensed under the Eclipse Public License 1.0 and the
+Eclipse Distribution License 1.0 as described in the epl-v10 and edl-v10 files.
+
+The EDL is copied below in order to pass the pkg.go.dev license check (https://pkg.go.dev/license-policy).
+
+****
+Eclipse Distribution License - v 1.0
+
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/README.md b/vendor/github.com/eclipse/paho.mqtt.golang/README.md
new file mode 100644
index 0000000..c1acf4e
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/README.md
@@ -0,0 +1,177 @@
+
+[](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang)
+[](https://goreportcard.com/report/github.com/eclipse/paho.mqtt.golang)
+
+Eclipse Paho MQTT Go client
+===========================
+
+
+This repository contains the source code for the [Eclipse Paho](https://eclipse.org/paho) MQTT 3.1/3.11 Go client library.
+
+This code builds a library which enable applications to connect to an [MQTT](https://mqtt.org) broker to publish
+messages, and to subscribe to topics and receive published messages.
+
+This library supports a fully asynchronous mode of operation.
+
+A client supporting MQTT V5 is [also available](https://github.com/eclipse/paho.golang).
+
+Installation and Build
+----------------------
+
+The process depends upon whether you are using [modules](https://golang.org/ref/mod) (recommended) or `GOPATH`.
+
+#### Modules
+
+If you are using [modules](https://blog.golang.org/using-go-modules) then `import "github.com/eclipse/paho.mqtt.golang"`
+and start using it. The necessary packages will be download automatically when you run `go build`.
+
+Note that the latest release will be downloaded and changes may have been made since the release. If you have
+encountered an issue, or wish to try the latest code for another reason, then run
+`go get github.com/eclipse/paho.mqtt.golang@master` to get the latest commit.
+
+#### GOPATH
+
+Installation is as easy as:
+
+```
+go get github.com/eclipse/paho.mqtt.golang
+```
+
+The client depends on Google's [proxy](https://godoc.org/golang.org/x/net/proxy) package and the
+[websockets](https://godoc.org/github.com/gorilla/websocket) package, also easily installed with the commands:
+
+```
+go get github.com/gorilla/websocket
+go get golang.org/x/net/proxy
+```
+
+
+Usage and API
+-------------
+
+Detailed API documentation is available by using to godoc tool, or can be browsed online
+using the [pkg.go.dev](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang) service.
+
+Samples are available in the `cmd` directory for reference.
+
+Note:
+
+The library also supports using MQTT over websockets by using the `ws://` (unsecure) or `wss://` (secure) prefix in the
+URI. If the client is running behind a corporate http/https proxy then the following environment variables `HTTP_PROXY`,
+`HTTPS_PROXY` and `NO_PROXY` are taken into account when establishing the connection.
+
+Troubleshooting
+---------------
+
+If you are new to MQTT and your application is not working as expected reviewing the
+[MQTT specification](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html), which this library implements,
+is a good first step. [MQTT.org](https://mqtt.org) has some [good resources](https://mqtt.org/getting-started/) that answer many
+common questions.
+
+### Error Handling
+
+The asynchronous nature of this library makes it easy to forget to check for errors. Consider using a go routine to
+log these:
+
+```go
+t := client.Publish("topic", qos, retained, msg)
+go func() {
+ _ = t.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
+ if t.Error() != nil {
+ log.Error(t.Error()) // Use your preferred logging technique (or just fmt.Printf)
+ }
+}()
+```
+
+### Logging
+
+If you are encountering issues then enabling logging, both within this library and on your broker, is a good way to
+begin troubleshooting. This library can produce various levels of log by assigning the logging endpoints, ERROR,
+CRITICAL, WARN and DEBUG. For example:
+
+```go
+func main() {
+ mqtt.ERROR = log.New(os.Stdout, "[ERROR] ", 0)
+ mqtt.CRITICAL = log.New(os.Stdout, "[CRIT] ", 0)
+ mqtt.WARN = log.New(os.Stdout, "[WARN] ", 0)
+ mqtt.DEBUG = log.New(os.Stdout, "[DEBUG] ", 0)
+
+ // Connect, Subscribe, Publish etc..
+}
+```
+
+### Common Problems
+
+* Seemingly random disconnections may be caused by another client connecting to the broker with the same client
+identifier; this is as per the [spec](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc384800405).
+* Unless ordered delivery of messages is essential (and you have configured your broker to support this e.g.
+ `max_inflight_messages=1` in mosquitto) then set `ClientOptions.SetOrderMatters(false)`. Doing so will avoid the
+ below issue (deadlocks due to blocking message handlers).
+* A `MessageHandler` (called when a new message is received) must not block (unless
+ `ClientOptions.SetOrderMatters(false)` set). If you wish to perform a long-running task, or publish a message, then
+ please use a go routine (blocking in the handler is a common cause of unexpected `pingresp
+not received, disconnecting` errors).
+* When QOS1+ subscriptions have been created previously and you connect with `CleanSession` set to false it is possible that the broker will deliver retained
+messages before `Subscribe` can be called. To process these messages either configure a handler with `AddRoute` or
+set a `DefaultPublishHandler`.
+* Loss of network connectivity may not be detected immediately. If this is an issue then consider setting
+`ClientOptions.KeepAlive` (sends regular messages to check the link is active).
+* Brokers offer many configuration options; some settings may lead to unexpected results. If using Mosquitto check
+`max_inflight_messages`, `max_queued_messages`, `persistence` (the defaults may not be what you expect).
+
+Reporting bugs
+--------------
+
+Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues
+
+*A limited number of contributors monitor the issues section so if you have a general question please consider the
+resources in the [more information](#more-information) section (your question will be seen by more people, and you are
+likely to receive an answer more quickly).*
+
+We welcome bug reports, but it is important they are actionable. A significant percentage of issues reported are not
+resolved due to a lack of information. If we cannot replicate the problem then it is unlikely we will be able to fix it.
+The information required will vary from issue to issue but consider including:
+
+* Which version of the package you are using (tag or commit - this should be in your go.mod file)
+* A [Minimal, Reproducible Example](https://stackoverflow.com/help/minimal-reproducible-example). Providing an example
+is the best way to demonstrate the issue you are facing; it is important this includes all relevant information
+(including broker configuration). Docker (see `cmd/docker`) makes it relatively simple to provide a working end-to-end
+example.
+* A full, clear, description of the problem (detail what you are expecting vs what actually happens).
+* Details of your attempts to resolve the issue (what have you tried, what worked, what did not).
+* [Application Logs](#logging) covering the period the issue occurred. Unless you have isolated the root cause of the issue please include a link to a full log (including data from well before the problem arose).
+* Broker Logs covering the period the issue occurred.
+
+It is important to remember that this library does not stand alone; it communicates with a broker and any issues you are
+seeing may be due to:
+
+* Bugs in your code.
+* Bugs in this library.
+* The broker configuration.
+* Bugs in the broker.
+* Issues with whatever you are communicating with.
+
+When submitting an issue, please ensure that you provide sufficient details to enable us to eliminate causes outside of
+this library.
+
+Contributing
+------------
+
+We welcome pull requests but before your contribution can be accepted by the project, you need to create and
+electronically sign the Eclipse Contributor Agreement (ECA) and sign off on the Eclipse Foundation Certificate of Origin.
+
+More information is available in the
+[Eclipse Development Resources](http://wiki.eclipse.org/Development_Resources/Contributing_via_Git); please take special
+note of the requirement that the commit record contain a "Signed-off-by" entry.
+
+More information
+----------------
+
+Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev).
+
+General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt).
+
+There is much more information available via the [MQTT community site](http://mqtt.org).
+
+[Stack Overflow](https://stackoverflow.com/questions/tagged/mqtt+go) has a range questions covering a range of common
+issues (both relating to use of this library and MQTT in general).
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/about.html b/vendor/github.com/eclipse/paho.mqtt.golang/about.html
new file mode 100644
index 0000000..b183f41
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/about.html
@@ -0,0 +1,41 @@
+
+
+
+About
+
+
+About This Content
+
+December 9, 2013
+License
+
+The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise
+indicated below, the Content is provided to you under the terms and conditions of the
+Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL").
+A copy of the EPL is available at
+http://www.eclipse.org/legal/epl-v10.html
+and a copy of the EDL is available at
+http://www.eclipse.org/org/documents/edl-v10.php.
+For purposes of the EPL, "Program" will mean the Content.
+
+If you did not receive this Content directly from the Eclipse Foundation, the Content is
+being redistributed by another party ("Redistributor") and different terms and conditions may
+apply to your use of any object code in the Content. Check the Redistributor's license that was
+provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise
+indicated below, the terms and conditions of the EPL still apply to any source code in the Content
+and such source code may be obtained at http://www.eclipse.org.
+
+
+ Third Party Content
+ The Content includes items that have been sourced from third parties as set out below. If you
+ did not receive this Content directly from the Eclipse Foundation, the following is provided
+ for informational purposes only, and you should look to the Redistributor's license for
+ terms and conditions of use.
+
+ None
+
+
+
+
+
+
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/client.go b/vendor/github.com/eclipse/paho.mqtt.golang/client.go
new file mode 100644
index 0000000..847daee
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/client.go
@@ -0,0 +1,1127 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+// Portions copyright © 2018 TIBCO Software Inc.
+
+// Package mqtt provides an MQTT v3.1.1 client library.
+package mqtt
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const (
+ disconnected uint32 = iota
+ connecting
+ reconnecting
+ connected
+)
+
+// Client is the interface definition for a Client as used by this
+// library, the interface is primarily to allow mocking tests.
+//
+// It is an MQTT v3.1.1 client for communicating
+// with an MQTT server using non-blocking methods that allow work
+// to be done in the background.
+// An application may connect to an MQTT server using:
+// A plain TCP socket
+// A secure SSL/TLS socket
+// A websocket
+// To enable ensured message delivery at Quality of Service (QoS) levels
+// described in the MQTT spec, a message persistence mechanism must be
+// used. This is done by providing a type which implements the Store
+// interface. For convenience, FileStore and MemoryStore are provided
+// implementations that should be sufficient for most use cases. More
+// information can be found in their respective documentation.
+// Numerous connection options may be specified by configuring a
+// and then supplying a ClientOptions type.
+// Implementations of Client must be safe for concurrent use by multiple
+// goroutines
+type Client interface {
+ // IsConnected returns a bool signifying whether
+ // the client is connected or not.
+ IsConnected() bool
+ // IsConnectionOpen return a bool signifying whether the client has an active
+ // connection to mqtt broker, i.e not in disconnected or reconnect mode
+ IsConnectionOpen() bool
+ // Connect will create a connection to the message broker, by default
+ // it will attempt to connect at v3.1.1 and auto retry at v3.1 if that
+ // fails
+ Connect() Token
+ // Disconnect will end the connection with the server, but not before waiting
+ // the specified number of milliseconds to wait for existing work to be
+ // completed.
+ Disconnect(quiesce uint)
+ // Publish will publish a message with the specified QoS and content
+ // to the specified topic.
+ // Returns a token to track delivery of the message to the broker
+ Publish(topic string, qos byte, retained bool, payload interface{}) Token
+ // Subscribe starts a new subscription. Provide a MessageHandler to be executed when
+ // a message is published on the topic provided, or nil for the default handler.
+ //
+ // If options.OrderMatters is true (the default) then callback must not block or
+ // call functions within this package that may block (e.g. Publish) other than in
+ // a new go routine.
+ // callback must be safe for concurrent use by multiple goroutines.
+ Subscribe(topic string, qos byte, callback MessageHandler) Token
+ // SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to
+ // be executed when a message is published on one of the topics provided, or nil for the
+ // default handler.
+ //
+ // If options.OrderMatters is true (the default) then callback must not block or
+ // call functions within this package that may block (e.g. Publish) other than in
+ // a new go routine.
+ // callback must be safe for concurrent use by multiple goroutines.
+ SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token
+ // Unsubscribe will end the subscription from each of the topics provided.
+ // Messages published to those topics from other clients will no longer be
+ // received.
+ Unsubscribe(topics ...string) Token
+ // AddRoute allows you to add a handler for messages on a specific topic
+ // without making a subscription. For example having a different handler
+ // for parts of a wildcard subscription or for receiving retained messages
+ // upon connection (before Sub scribe can be processed).
+ //
+ // If options.OrderMatters is true (the default) then callback must not block or
+ // call functions within this package that may block (e.g. Publish) other than in
+ // a new go routine.
+ // callback must be safe for concurrent use by multiple goroutines.
+ AddRoute(topic string, callback MessageHandler)
+ // OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions
+ // in use by the client.
+ OptionsReader() ClientOptionsReader
+}
+
+// client implements the Client interface
+// clients are safe for concurrent use by multiple
+// goroutines
+type client struct {
+ lastSent atomic.Value // time.Time - the last time a packet was successfully sent to network
+ lastReceived atomic.Value // time.Time - the last time a packet was successfully received from network
+ pingOutstanding int32 // set to 1 if a ping has been sent but response not ret received
+
+ status uint32 // see const definitions at top of file for possible values
+ sync.RWMutex // Protects the above two variables (note: atomic writes are also used somewhat inconsistently)
+
+ messageIds // effectively a map from message id to token completor
+
+ obound chan *PacketAndToken // outgoing publish packet
+ oboundP chan *PacketAndToken // outgoing 'priority' packet (anything other than publish)
+ msgRouter *router // routes topics to handlers
+ persist Store
+ options ClientOptions
+ optionsMu sync.Mutex // Protects the options in a few limited cases where needed for testing
+
+ conn net.Conn // the network connection, must only be set with connMu locked (only used when starting/stopping workers)
+ connMu sync.Mutex // mutex for the connection (again only used in two functions)
+
+ stop chan struct{} // Closed to request that workers stop
+ workers sync.WaitGroup // used to wait for workers to complete (ping, keepalive, errwatch, resume)
+ commsStopped chan struct{} // closed when the comms routines have stopped (kept running until after workers have closed to avoid deadlocks)
+}
+
+// NewClient will create an MQTT v3.1.1 client with all of the options specified
+// in the provided ClientOptions. The client must have the Connect method called
+// on it before it may be used. This is to make sure resources (such as a net
+// connection) are created before the application is actually ready.
+func NewClient(o *ClientOptions) Client {
+ c := &client{}
+ c.options = *o
+
+ if c.options.Store == nil {
+ c.options.Store = NewMemoryStore()
+ }
+ switch c.options.ProtocolVersion {
+ case 3, 4:
+ c.options.protocolVersionExplicit = true
+ case 0x83, 0x84:
+ c.options.protocolVersionExplicit = true
+ default:
+ c.options.ProtocolVersion = 4
+ c.options.protocolVersionExplicit = false
+ }
+ c.persist = c.options.Store
+ c.status = disconnected
+ c.messageIds = messageIds{index: make(map[uint16]tokenCompletor)}
+ c.msgRouter = newRouter()
+ c.msgRouter.setDefaultHandler(c.options.DefaultPublishHandler)
+ c.obound = make(chan *PacketAndToken)
+ c.oboundP = make(chan *PacketAndToken)
+ return c
+}
+
+// AddRoute allows you to add a handler for messages on a specific topic
+// without making a subscription. For example having a different handler
+// for parts of a wildcard subscription
+//
+// If options.OrderMatters is true (the default) then callback must not block or
+// call functions within this package that may block (e.g. Publish) other than in
+// a new go routine.
+// callback must be safe for concurrent use by multiple goroutines.
+func (c *client) AddRoute(topic string, callback MessageHandler) {
+ if callback != nil {
+ c.msgRouter.addRoute(topic, callback)
+ }
+}
+
+// IsConnected returns a bool signifying whether
+// the client is connected or not.
+// connected means that the connection is up now OR it will
+// be established/reestablished automatically when possible
+func (c *client) IsConnected() bool {
+ c.RLock()
+ defer c.RUnlock()
+ status := atomic.LoadUint32(&c.status)
+ switch {
+ case status == connected:
+ return true
+ case c.options.AutoReconnect && status > connecting:
+ return true
+ case c.options.ConnectRetry && status == connecting:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsConnectionOpen return a bool signifying whether the client has an active
+// connection to mqtt broker, i.e not in disconnected or reconnect mode
+func (c *client) IsConnectionOpen() bool {
+ c.RLock()
+ defer c.RUnlock()
+ status := atomic.LoadUint32(&c.status)
+ switch {
+ case status == connected:
+ return true
+ default:
+ return false
+ }
+}
+
+func (c *client) connectionStatus() uint32 {
+ c.RLock()
+ defer c.RUnlock()
+ status := atomic.LoadUint32(&c.status)
+ return status
+}
+
+func (c *client) setConnected(status uint32) {
+ c.Lock()
+ defer c.Unlock()
+ atomic.StoreUint32(&c.status, status)
+}
+
+// ErrNotConnected is the error returned from function calls that are
+// made when the client is not connected to a broker
+var ErrNotConnected = errors.New("not Connected")
+
+// Connect will create a connection to the message broker, by default
+// it will attempt to connect at v3.1.1 and auto retry at v3.1 if that
+// fails
+// Note: If using QOS1+ and CleanSession=false it is advisable to add
+// routes (or a DefaultPublishHandler) prior to calling Connect()
+// because queued messages may be delivered immediately post connection
+func (c *client) Connect() Token {
+ t := newToken(packets.Connect).(*ConnectToken)
+ DEBUG.Println(CLI, "Connect()")
+
+ if c.options.ConnectRetry && atomic.LoadUint32(&c.status) != disconnected {
+ // if in any state other than disconnected and ConnectRetry is
+ // enabled then the connection will come up automatically
+ // client can assume connection is up
+ WARN.Println(CLI, "Connect() called but not disconnected")
+ t.returnCode = packets.Accepted
+ t.flowComplete()
+ return t
+ }
+
+ c.persist.Open()
+ if c.options.ConnectRetry {
+ c.reserveStoredPublishIDs() // Reserve IDs to allow publish before connect complete
+ }
+ c.setConnected(connecting)
+
+ go func() {
+ if len(c.options.Servers) == 0 {
+ t.setError(fmt.Errorf("no servers defined to connect to"))
+ return
+ }
+
+ RETRYCONN:
+ var conn net.Conn
+ var rc byte
+ var err error
+ conn, rc, t.sessionPresent, err = c.attemptConnection()
+ if err != nil {
+ if c.options.ConnectRetry {
+ DEBUG.Println(CLI, "Connect failed, sleeping for", int(c.options.ConnectRetryInterval.Seconds()), "seconds and will then retry")
+ time.Sleep(c.options.ConnectRetryInterval)
+
+ if atomic.LoadUint32(&c.status) == connecting {
+ goto RETRYCONN
+ }
+ }
+ ERROR.Println(CLI, "Failed to connect to a broker")
+ c.setConnected(disconnected)
+ c.persist.Close()
+ t.returnCode = rc
+ t.setError(err)
+ return
+ }
+ inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing
+ if c.startCommsWorkers(conn, inboundFromStore) {
+ // Take care of any messages in the store
+ if !c.options.CleanSession {
+ c.resume(c.options.ResumeSubs, inboundFromStore)
+ } else {
+ c.persist.Reset()
+ }
+ } else {
+ WARN.Println(CLI, "Connect() called but connection established in another goroutine")
+ }
+
+ close(inboundFromStore)
+ t.flowComplete()
+ DEBUG.Println(CLI, "exit startClient")
+ }()
+ return t
+}
+
+// internal function used to reconnect the client when it loses its connection
+func (c *client) reconnect() {
+ DEBUG.Println(CLI, "enter reconnect")
+ var (
+ sleep = 1 * time.Second
+ conn net.Conn
+ )
+
+ for {
+ if nil != c.options.OnReconnecting {
+ c.options.OnReconnecting(c, &c.options)
+ }
+ var err error
+ conn, _, _, err = c.attemptConnection()
+ if err == nil {
+ break
+ }
+ DEBUG.Println(CLI, "Reconnect failed, sleeping for", int(sleep.Seconds()), "seconds:", err)
+ time.Sleep(sleep)
+ if sleep < c.options.MaxReconnectInterval {
+ sleep *= 2
+ }
+
+ if sleep > c.options.MaxReconnectInterval {
+ sleep = c.options.MaxReconnectInterval
+ }
+ // Disconnect may have been called
+ if atomic.LoadUint32(&c.status) == disconnected {
+ break
+ }
+ }
+
+ // Disconnect() must have been called while we were trying to reconnect.
+ if c.connectionStatus() == disconnected {
+ if conn != nil {
+ conn.Close()
+ }
+ DEBUG.Println(CLI, "Client moved to disconnected state while reconnecting, abandoning reconnect")
+ return
+ }
+
+ inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing
+ if c.startCommsWorkers(conn, inboundFromStore) {
+ c.resume(c.options.ResumeSubs, inboundFromStore)
+ }
+ close(inboundFromStore)
+}
+
+// attemptConnection makes a single attempt to connect to each of the brokers
+// the protocol version to use is passed in (as c.options.ProtocolVersion)
+// Note: Does not set c.conn in order to minimise race conditions
+// Returns:
+// net.Conn - Connected network connection
+// byte - Return code (packets.Accepted indicates a successful connection).
+// bool - SessionPresent flag from the connect ack (only valid if packets.Accepted)
+// err - Error (err != nil guarantees that conn has been set to active connection).
+func (c *client) attemptConnection() (net.Conn, byte, bool, error) {
+ protocolVersion := c.options.ProtocolVersion
+ var (
+ sessionPresent bool
+ conn net.Conn
+ err error
+ rc byte
+ )
+
+ c.optionsMu.Lock() // Protect c.options.Servers so that servers can be added in test cases
+ brokers := c.options.Servers
+ c.optionsMu.Unlock()
+ for _, broker := range brokers {
+ cm := newConnectMsgFromOptions(&c.options, broker)
+ DEBUG.Println(CLI, "about to write new connect msg")
+ CONN:
+ tlsCfg := c.options.TLSConfig
+ if c.options.OnConnectAttempt != nil {
+ DEBUG.Println(CLI, "using custom onConnectAttempt handler...")
+ tlsCfg = c.options.OnConnectAttempt(broker, c.options.TLSConfig)
+ }
+ // Start by opening the network connection (tcp, tls, ws) etc
+ conn, err = openConnection(broker, tlsCfg, c.options.ConnectTimeout, c.options.HTTPHeaders, c.options.WebsocketOptions)
+ if err != nil {
+ ERROR.Println(CLI, err.Error())
+ WARN.Println(CLI, "failed to connect to broker, trying next")
+ rc = packets.ErrNetworkError
+ continue
+ }
+ DEBUG.Println(CLI, "socket connected to broker")
+
+ // Now we send the perform the MQTT connection handshake
+ rc, sessionPresent, err = connectMQTT(conn, cm, protocolVersion)
+ if rc == packets.Accepted {
+ break // successfully connected
+ }
+
+ // We may be have to attempt the connection with MQTT 3.1
+ if conn != nil {
+ _ = conn.Close()
+ }
+ if !c.options.protocolVersionExplicit && protocolVersion == 4 { // try falling back to 3.1?
+ DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol")
+ protocolVersion = 3
+ goto CONN
+ }
+ if c.options.protocolVersionExplicit { // to maintain logging from previous version
+ ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc])
+ }
+ }
+ // If the connection was successful we set member variable and lock in the protocol version for future connection attempts (and users)
+ if rc == packets.Accepted {
+ c.options.ProtocolVersion = protocolVersion
+ c.options.protocolVersionExplicit = true
+ } else {
+ // Maintain same error format as used previously
+ if rc != packets.ErrNetworkError { // mqtt error
+ err = packets.ConnErrors[rc]
+ } else { // network error (if this occurred in ConnectMQTT then err will be nil)
+ err = fmt.Errorf("%s : %s", packets.ConnErrors[rc], err)
+ }
+ }
+ return conn, rc, sessionPresent, err
+}
+
+// Disconnect will end the connection with the server, but not before waiting
+// the specified number of milliseconds to wait for existing work to be
+// completed.
+func (c *client) Disconnect(quiesce uint) {
+ status := atomic.LoadUint32(&c.status)
+ if status == connected {
+ DEBUG.Println(CLI, "disconnecting")
+ c.setConnected(disconnected)
+
+ dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
+ dt := newToken(packets.Disconnect)
+ disconnectSent := false
+ select {
+ case c.oboundP <- &PacketAndToken{p: dm, t: dt}:
+ disconnectSent = true
+ case <-c.commsStopped:
+ WARN.Println("Disconnect packet could not be sent because comms stopped")
+ case <-time.After(time.Duration(quiesce) * time.Millisecond):
+ WARN.Println("Disconnect packet not sent due to timeout")
+ }
+
+ // wait for work to finish, or quiesce time consumed
+ if disconnectSent {
+ DEBUG.Println(CLI, "calling WaitTimeout")
+ dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond)
+ DEBUG.Println(CLI, "WaitTimeout done")
+ }
+ } else {
+ WARN.Println(CLI, "Disconnect() called but not connected (disconnected/reconnecting)")
+ c.setConnected(disconnected)
+ }
+
+ c.disconnect()
+}
+
+// forceDisconnect will end the connection with the mqtt broker immediately (used for tests only)
+func (c *client) forceDisconnect() {
+ if !c.IsConnected() {
+ WARN.Println(CLI, "already disconnected")
+ return
+ }
+ c.setConnected(disconnected)
+ DEBUG.Println(CLI, "forcefully disconnecting")
+ c.disconnect()
+}
+
+// disconnect cleans up after a final disconnection (user requested so no auto reconnection)
+func (c *client) disconnect() {
+ done := c.stopCommsWorkers()
+ if done != nil {
+ <-done // Wait until the disconnect is complete (to limit chance that another connection will be started)
+ DEBUG.Println(CLI, "forcefully disconnecting")
+ c.messageIds.cleanUp()
+ DEBUG.Println(CLI, "disconnected")
+ c.persist.Close()
+ }
+}
+
+// internalConnLost cleanup when connection is lost or an error occurs
+// Note: This function will not block
+func (c *client) internalConnLost(err error) {
+ // It is possible that internalConnLost will be called multiple times simultaneously
+ // (including after sending a DisconnectPacket) as such we only do cleanup etc if the
+ // routines were actually running and are not being disconnected at users request
+ DEBUG.Println(CLI, "internalConnLost called")
+ stopDone := c.stopCommsWorkers()
+ if stopDone != nil { // stopDone will be nil if workers already in the process of stopping or stopped
+ go func() {
+ DEBUG.Println(CLI, "internalConnLost waiting on workers")
+ <-stopDone
+ DEBUG.Println(CLI, "internalConnLost workers stopped")
+ // It is possible that Disconnect was called which led to this error so reconnection depends upon status
+ reconnect := c.options.AutoReconnect && c.connectionStatus() > connecting
+
+ if c.options.CleanSession && !reconnect {
+ c.messageIds.cleanUp()
+ }
+ if reconnect {
+ c.setConnected(reconnecting)
+ go c.reconnect()
+ } else {
+ c.setConnected(disconnected)
+ }
+ if c.options.OnConnectionLost != nil {
+ go c.options.OnConnectionLost(c, err)
+ }
+ DEBUG.Println(CLI, "internalConnLost complete")
+ }()
+ }
+}
+
+// startCommsWorkers is called when the connection is up.
+// It starts off all of the routines needed to process incoming and outgoing messages.
+// Returns true if the comms workers were started (i.e. they were not already running)
+func (c *client) startCommsWorkers(conn net.Conn, inboundFromStore <-chan packets.ControlPacket) bool {
+ DEBUG.Println(CLI, "startCommsWorkers called")
+ c.connMu.Lock()
+ defer c.connMu.Unlock()
+ if c.conn != nil {
+ WARN.Println(CLI, "startCommsWorkers called when commsworkers already running")
+ conn.Close() // No use for the new network connection
+ return false
+ }
+ c.conn = conn // Store the connection
+
+ c.stop = make(chan struct{})
+ if c.options.KeepAlive != 0 {
+ atomic.StoreInt32(&c.pingOutstanding, 0)
+ c.lastReceived.Store(time.Now())
+ c.lastSent.Store(time.Now())
+ c.workers.Add(1)
+ go keepalive(c, conn)
+ }
+
+ // matchAndDispatch will process messages received from the network. It may generate acknowledgements
+ // It will complete when incomingPubChan is closed and will close ackOut prior to exiting
+ incomingPubChan := make(chan *packets.PublishPacket)
+ c.workers.Add(1) // Done will be called when ackOut is closed
+ ackOut := c.msgRouter.matchAndDispatch(incomingPubChan, c.options.Order, c)
+
+ c.setConnected(connected)
+ DEBUG.Println(CLI, "client is connected/reconnected")
+ if c.options.OnConnect != nil {
+ go c.options.OnConnect(c)
+ }
+
+ // c.oboundP and c.obound need to stay active for the life of the client because, depending upon the options,
+ // messages may be published while the client is disconnected (they will block unless in a goroutine). However
+ // to keep the comms routines clean we want to shutdown the input messages it uses so create out own channels
+ // and copy data across.
+ commsobound := make(chan *PacketAndToken) // outgoing publish packets
+ commsoboundP := make(chan *PacketAndToken) // outgoing 'priority' packet
+ c.workers.Add(1)
+ go func() {
+ defer c.workers.Done()
+ for {
+ select {
+ case msg := <-c.oboundP:
+ commsoboundP <- msg
+ case msg := <-c.obound:
+ commsobound <- msg
+ case msg, ok := <-ackOut:
+ if !ok {
+ ackOut = nil // ignore channel going forward
+ c.workers.Done() // matchAndDispatch has completed
+ continue // await next message
+ }
+ commsoboundP <- msg
+ case <-c.stop:
+ // Attempt to transmit any outstanding acknowledgements (this may well fail but should work if this is a clean disconnect)
+ if ackOut != nil {
+ for msg := range ackOut {
+ commsoboundP <- msg
+ }
+ c.workers.Done() // matchAndDispatch has completed
+ }
+ close(commsoboundP) // Nothing sending to these channels anymore so close them and allow comms routines to exit
+ close(commsobound)
+ DEBUG.Println(CLI, "startCommsWorkers output redirector finished")
+ return
+ }
+ }
+ }()
+
+ commsIncomingPub, commsErrors := startComms(c.conn, c, inboundFromStore, commsoboundP, commsobound)
+ c.commsStopped = make(chan struct{})
+ go func() {
+ for {
+ if commsIncomingPub == nil && commsErrors == nil {
+ break
+ }
+ select {
+ case pub, ok := <-commsIncomingPub:
+ if !ok {
+ // Incoming comms has shutdown
+ close(incomingPubChan) // stop the router
+ commsIncomingPub = nil
+ continue
+ }
+ // Care is needed here because an error elsewhere could trigger a deadlock
+ sendPubLoop:
+ for {
+ select {
+ case incomingPubChan <- pub:
+ break sendPubLoop
+ case err, ok := <-commsErrors:
+ if !ok { // commsErrors has been closed so we can ignore it
+ commsErrors = nil
+ continue
+ }
+ ERROR.Println(CLI, "Connect comms goroutine - error triggered during send Pub", err)
+ c.internalConnLost(err) // no harm in calling this if the connection is already down (or shutdown is in progress)
+ continue
+ }
+ }
+ case err, ok := <-commsErrors:
+ if !ok {
+ commsErrors = nil
+ continue
+ }
+ ERROR.Println(CLI, "Connect comms goroutine - error triggered", err)
+ c.internalConnLost(err) // no harm in calling this if the connection is already down (or shutdown is in progress)
+ continue
+ }
+ }
+ DEBUG.Println(CLI, "incoming comms goroutine done")
+ close(c.commsStopped)
+ }()
+ DEBUG.Println(CLI, "startCommsWorkers done")
+ return true
+}
+
+// stopWorkersAndComms - Cleanly shuts down worker go routines (including the comms routines) and waits until everything has stopped
+// Returns nil it workers did not need to be stopped; otherwise returns a channel which will be closed when the stop is complete
+// Note: This may block so run as a go routine if calling from any of the comms routines
+func (c *client) stopCommsWorkers() chan struct{} {
+ DEBUG.Println(CLI, "stopCommsWorkers called")
+ // It is possible that this function will be called multiple times simultaneously due to the way things get shutdown
+ c.connMu.Lock()
+ if c.conn == nil {
+ DEBUG.Println(CLI, "stopCommsWorkers done (not running)")
+ c.connMu.Unlock()
+ return nil
+ }
+
+ // It is important that everything is stopped in the correct order to avoid deadlocks. The main issue here is
+ // the router because it both receives incoming publish messages and also sends outgoing acknowledgements. To
+ // avoid issues we signal the workers to stop and close the connection (it is probably already closed but
+ // there is no harm in being sure). We can then wait for the workers to finnish before closing outbound comms
+ // channels which will allow the comms routines to exit.
+
+ // We stop all non-comms related workers first (ping, keepalive, errwatch, resume etc) so they don't get blocked waiting on comms
+ close(c.stop) // Signal for workers to stop
+ c.conn.Close() // Possible that this is already closed but no harm in closing again
+ c.conn = nil // Important that this is the only place that this is set to nil
+ c.connMu.Unlock() // As the connection is now nil we can unlock the mu (allowing subsequent calls to exit immediately)
+
+ doneChan := make(chan struct{})
+
+ go func() {
+ DEBUG.Println(CLI, "stopCommsWorkers waiting for workers")
+ c.workers.Wait()
+
+ // Stopping the workers will allow the comms routines to exit; we wait for these to complete
+ DEBUG.Println(CLI, "stopCommsWorkers waiting for comms")
+ <-c.commsStopped // wait for comms routine to stop
+
+ DEBUG.Println(CLI, "stopCommsWorkers done")
+ close(doneChan)
+ }()
+ return doneChan
+}
+
+// Publish will publish a message with the specified QoS and content
+// to the specified topic.
+// Returns a token to track delivery of the message to the broker
+func (c *client) Publish(topic string, qos byte, retained bool, payload interface{}) Token {
+ token := newToken(packets.Publish).(*PublishToken)
+ DEBUG.Println(CLI, "enter Publish")
+ switch {
+ case !c.IsConnected():
+ token.setError(ErrNotConnected)
+ return token
+ case c.connectionStatus() == reconnecting && qos == 0:
+ token.flowComplete()
+ return token
+ }
+ pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
+ pub.Qos = qos
+ pub.TopicName = topic
+ pub.Retain = retained
+ switch p := payload.(type) {
+ case string:
+ pub.Payload = []byte(p)
+ case []byte:
+ pub.Payload = p
+ case bytes.Buffer:
+ pub.Payload = p.Bytes()
+ default:
+ token.setError(fmt.Errorf("unknown payload type"))
+ return token
+ }
+
+ if pub.Qos != 0 && pub.MessageID == 0 {
+ mID := c.getID(token)
+ if mID == 0 {
+ token.setError(fmt.Errorf("no message IDs available"))
+ return token
+ }
+ pub.MessageID = mID
+ token.messageID = mID
+ }
+ persistOutbound(c.persist, pub)
+ switch c.connectionStatus() {
+ case connecting:
+ DEBUG.Println(CLI, "storing publish message (connecting), topic:", topic)
+ case reconnecting:
+ DEBUG.Println(CLI, "storing publish message (reconnecting), topic:", topic)
+ default:
+ DEBUG.Println(CLI, "sending publish message, topic:", topic)
+ publishWaitTimeout := c.options.WriteTimeout
+ if publishWaitTimeout == 0 {
+ publishWaitTimeout = time.Second * 30
+ }
+ select {
+ case c.obound <- &PacketAndToken{p: pub, t: token}:
+ case <-time.After(publishWaitTimeout):
+ token.setError(errors.New("publish was broken by timeout"))
+ }
+ }
+ return token
+}
+
+// Subscribe starts a new subscription. Provide a MessageHandler to be executed when
+// a message is published on the topic provided.
+//
+// If options.OrderMatters is true (the default) then callback must not block or
+// call functions within this package that may block (e.g. Publish) other than in
+// a new go routine.
+// callback must be safe for concurrent use by multiple goroutines.
+func (c *client) Subscribe(topic string, qos byte, callback MessageHandler) Token {
+ token := newToken(packets.Subscribe).(*SubscribeToken)
+ DEBUG.Println(CLI, "enter Subscribe")
+ if !c.IsConnected() {
+ token.setError(ErrNotConnected)
+ return token
+ }
+ if !c.IsConnectionOpen() {
+ switch {
+ case !c.options.ResumeSubs:
+ // if not connected and resumesubs not set this sub will be thrown away
+ token.setError(fmt.Errorf("not currently connected and ResumeSubs not set"))
+ return token
+ case c.options.CleanSession && c.connectionStatus() == reconnecting:
+ // if reconnecting and cleansession is true this sub will be thrown away
+ token.setError(fmt.Errorf("reconnecting state and cleansession is true"))
+ return token
+ }
+ }
+ sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
+ if err := validateTopicAndQos(topic, qos); err != nil {
+ token.setError(err)
+ return token
+ }
+ sub.Topics = append(sub.Topics, topic)
+ sub.Qoss = append(sub.Qoss, qos)
+
+ if strings.HasPrefix(topic, "$share/") {
+ topic = strings.Join(strings.Split(topic, "/")[2:], "/")
+ }
+
+ if strings.HasPrefix(topic, "$queue/") {
+ topic = strings.TrimPrefix(topic, "$queue/")
+ }
+
+ if callback != nil {
+ c.msgRouter.addRoute(topic, callback)
+ }
+
+ token.subs = append(token.subs, topic)
+
+ if sub.MessageID == 0 {
+ mID := c.getID(token)
+ if mID == 0 {
+ token.setError(fmt.Errorf("no message IDs available"))
+ return token
+ }
+ sub.MessageID = mID
+ token.messageID = mID
+ }
+ DEBUG.Println(CLI, sub.String())
+
+ persistOutbound(c.persist, sub)
+ switch c.connectionStatus() {
+ case connecting:
+ DEBUG.Println(CLI, "storing subscribe message (connecting), topic:", topic)
+ case reconnecting:
+ DEBUG.Println(CLI, "storing subscribe message (reconnecting), topic:", topic)
+ default:
+ DEBUG.Println(CLI, "sending subscribe message, topic:", topic)
+ subscribeWaitTimeout := c.options.WriteTimeout
+ if subscribeWaitTimeout == 0 {
+ subscribeWaitTimeout = time.Second * 30
+ }
+ select {
+ case c.oboundP <- &PacketAndToken{p: sub, t: token}:
+ case <-time.After(subscribeWaitTimeout):
+ token.setError(errors.New("subscribe was broken by timeout"))
+ }
+ }
+ DEBUG.Println(CLI, "exit Subscribe")
+ return token
+}
+
+// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to
+// be executed when a message is published on one of the topics provided.
+//
+// If options.OrderMatters is true (the default) then callback must not block or
+// call functions within this package that may block (e.g. Publish) other than in
+// a new go routine.
+// callback must be safe for concurrent use by multiple goroutines.
+func (c *client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token {
+ var err error
+ token := newToken(packets.Subscribe).(*SubscribeToken)
+ DEBUG.Println(CLI, "enter SubscribeMultiple")
+ if !c.IsConnected() {
+ token.setError(ErrNotConnected)
+ return token
+ }
+ if !c.IsConnectionOpen() {
+ switch {
+ case !c.options.ResumeSubs:
+ // if not connected and resumesubs not set this sub will be thrown away
+ token.setError(fmt.Errorf("not currently connected and ResumeSubs not set"))
+ return token
+ case c.options.CleanSession && c.connectionStatus() == reconnecting:
+ // if reconnecting and cleansession is true this sub will be thrown away
+ token.setError(fmt.Errorf("reconnecting state and cleansession is true"))
+ return token
+ }
+ }
+ sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
+ if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil {
+ token.setError(err)
+ return token
+ }
+
+ if callback != nil {
+ for topic := range filters {
+ c.msgRouter.addRoute(topic, callback)
+ }
+ }
+ token.subs = make([]string, len(sub.Topics))
+ copy(token.subs, sub.Topics)
+
+ if sub.MessageID == 0 {
+ mID := c.getID(token)
+ if mID == 0 {
+ token.setError(fmt.Errorf("no message IDs available"))
+ return token
+ }
+ sub.MessageID = mID
+ token.messageID = mID
+ }
+ persistOutbound(c.persist, sub)
+ switch c.connectionStatus() {
+ case connecting:
+ DEBUG.Println(CLI, "storing subscribe message (connecting), topics:", sub.Topics)
+ case reconnecting:
+ DEBUG.Println(CLI, "storing subscribe message (reconnecting), topics:", sub.Topics)
+ default:
+ DEBUG.Println(CLI, "sending subscribe message, topics:", sub.Topics)
+ subscribeWaitTimeout := c.options.WriteTimeout
+ if subscribeWaitTimeout == 0 {
+ subscribeWaitTimeout = time.Second * 30
+ }
+ select {
+ case c.oboundP <- &PacketAndToken{p: sub, t: token}:
+ case <-time.After(subscribeWaitTimeout):
+ token.setError(errors.New("subscribe was broken by timeout"))
+ }
+ }
+ DEBUG.Println(CLI, "exit SubscribeMultiple")
+ return token
+}
+
+// reserveStoredPublishIDs reserves the ids for publish packets in the persistent store to ensure these are not duplicated
+func (c *client) reserveStoredPublishIDs() {
+ // The resume function sets the stored id for publish packets only (some other packets
+ // will get new ids in net code). This means that the only keys we need to ensure are
+ // unique are the publish ones (and these will completed/replaced in resume() )
+ if !c.options.CleanSession {
+ storedKeys := c.persist.All()
+ for _, key := range storedKeys {
+ packet := c.persist.Get(key)
+ if packet == nil {
+ continue
+ }
+ switch packet.(type) {
+ case *packets.PublishPacket:
+ details := packet.Details()
+ token := &PlaceHolderToken{id: details.MessageID}
+ c.claimID(token, details.MessageID)
+ }
+ }
+ }
+}
+
+// Load all stored messages and resend them
+// Call this to ensure QOS > 1,2 even after an application crash
+// Note: This function will exit if c.stop is closed (this allows the shutdown to proceed avoiding a potential deadlock)
+//
+func (c *client) resume(subscription bool, ibound chan packets.ControlPacket) {
+ DEBUG.Println(STR, "enter Resume")
+
+ storedKeys := c.persist.All()
+ for _, key := range storedKeys {
+ packet := c.persist.Get(key)
+ if packet == nil {
+ DEBUG.Println(STR, fmt.Sprintf("resume found NIL packet (%s)", key))
+ continue
+ }
+ details := packet.Details()
+ if isKeyOutbound(key) {
+ switch p := packet.(type) {
+ case *packets.SubscribePacket:
+ if subscription {
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending subscribe (%d)", details.MessageID))
+ subPacket := packet.(*packets.SubscribePacket)
+ token := newToken(packets.Subscribe).(*SubscribeToken)
+ token.messageID = details.MessageID
+ token.subs = append(token.subs, subPacket.Topics...)
+ c.claimID(token, details.MessageID)
+ select {
+ case c.oboundP <- &PacketAndToken{p: packet, t: token}:
+ case <-c.stop:
+ DEBUG.Println(STR, "resume exiting due to stop")
+ return
+ }
+ } else {
+ c.persist.Del(key) // Unsubscribe packets should not be retained following a reconnect
+ }
+ case *packets.UnsubscribePacket:
+ if subscription {
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending unsubscribe (%d)", details.MessageID))
+ token := newToken(packets.Unsubscribe).(*UnsubscribeToken)
+ select {
+ case c.oboundP <- &PacketAndToken{p: packet, t: token}:
+ case <-c.stop:
+ DEBUG.Println(STR, "resume exiting due to stop")
+ return
+ }
+ } else {
+ c.persist.Del(key) // Unsubscribe packets should not be retained following a reconnect
+ }
+ case *packets.PubrelPacket:
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending pubrel (%d)", details.MessageID))
+ select {
+ case c.oboundP <- &PacketAndToken{p: packet, t: nil}:
+ case <-c.stop:
+ DEBUG.Println(STR, "resume exiting due to stop")
+ return
+ }
+ case *packets.PublishPacket:
+ // spec: If the DUP flag is set to 0, it indicates that this is the first occasion that the Client or
+ // Server has attempted to send this MQTT PUBLISH Packet. If the DUP flag is set to 1, it indicates that
+ // this might be re-delivery of an earlier attempt to send the Packet.
+ //
+ // If the message is in the store than an attempt at delivery has been made (note that the message may
+ // never have made it onto the wire but tracking that would be complicated!).
+ if p.Qos != 0 { // spec: The DUP flag MUST be set to 0 for all QoS 0 messages
+ p.Dup = true
+ }
+ token := newToken(packets.Publish).(*PublishToken)
+ token.messageID = details.MessageID
+ c.claimID(token, details.MessageID)
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending publish (%d)", details.MessageID))
+ DEBUG.Println(STR, details)
+ select {
+ case c.obound <- &PacketAndToken{p: p, t: token}:
+ case <-c.stop:
+ DEBUG.Println(STR, "resume exiting due to stop")
+ return
+ }
+ default:
+ ERROR.Println(STR, "invalid message type in store (discarded)")
+ c.persist.Del(key)
+ }
+ } else {
+ switch packet.(type) {
+ case *packets.PubrelPacket:
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending incomming (%d)", details.MessageID))
+ select {
+ case ibound <- packet:
+ case <-c.stop:
+ DEBUG.Println(STR, "resume exiting due to stop (ibound <- packet)")
+ return
+ }
+ default:
+ ERROR.Println(STR, "invalid message type in store (discarded)")
+ c.persist.Del(key)
+ }
+ }
+ }
+ DEBUG.Println(STR, "exit resume")
+}
+
+// Unsubscribe will end the subscription from each of the topics provided.
+// Messages published to those topics from other clients will no longer be
+// received.
+func (c *client) Unsubscribe(topics ...string) Token {
+ token := newToken(packets.Unsubscribe).(*UnsubscribeToken)
+ DEBUG.Println(CLI, "enter Unsubscribe")
+ if !c.IsConnected() {
+ token.setError(ErrNotConnected)
+ return token
+ }
+ if !c.IsConnectionOpen() {
+ switch {
+ case !c.options.ResumeSubs:
+ // if not connected and resumesubs not set this unsub will be thrown away
+ token.setError(fmt.Errorf("not currently connected and ResumeSubs not set"))
+ return token
+ case c.options.CleanSession && c.connectionStatus() == reconnecting:
+ // if reconnecting and cleansession is true this unsub will be thrown away
+ token.setError(fmt.Errorf("reconnecting state and cleansession is true"))
+ return token
+ }
+ }
+ unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
+ unsub.Topics = make([]string, len(topics))
+ copy(unsub.Topics, topics)
+
+ if unsub.MessageID == 0 {
+ mID := c.getID(token)
+ if mID == 0 {
+ token.setError(fmt.Errorf("no message IDs available"))
+ return token
+ }
+ unsub.MessageID = mID
+ token.messageID = mID
+ }
+
+ persistOutbound(c.persist, unsub)
+
+ switch c.connectionStatus() {
+ case connecting:
+ DEBUG.Println(CLI, "storing unsubscribe message (connecting), topics:", topics)
+ case reconnecting:
+ DEBUG.Println(CLI, "storing unsubscribe message (reconnecting), topics:", topics)
+ default:
+ DEBUG.Println(CLI, "sending unsubscribe message, topics:", topics)
+ subscribeWaitTimeout := c.options.WriteTimeout
+ if subscribeWaitTimeout == 0 {
+ subscribeWaitTimeout = time.Second * 30
+ }
+ select {
+ case c.oboundP <- &PacketAndToken{p: unsub, t: token}:
+ for _, topic := range topics {
+ c.msgRouter.deleteRoute(topic)
+ }
+ case <-time.After(subscribeWaitTimeout):
+ token.setError(errors.New("unsubscribe was broken by timeout"))
+ }
+ }
+
+ DEBUG.Println(CLI, "exit Unsubscribe")
+ return token
+}
+
+// OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions
+// in use by the client.
+func (c *client) OptionsReader() ClientOptionsReader {
+ r := ClientOptionsReader{options: &c.options}
+ return r
+}
+
+// DefaultConnectionLostHandler is a definition of a function that simply
+// reports to the DEBUG log the reason for the client losing a connection.
+func DefaultConnectionLostHandler(client Client, reason error) {
+ DEBUG.Println("Connection lost:", reason.Error())
+}
+
+// UpdateLastReceived - Will be called whenever a packet is received off the network
+// This is used by the keepalive routine to
+func (c *client) UpdateLastReceived() {
+ if c.options.KeepAlive != 0 {
+ c.lastReceived.Store(time.Now())
+ }
+}
+
+// UpdateLastReceived - Will be called whenever a packet is successfully transmitted to the network
+func (c *client) UpdateLastSent() {
+ if c.options.KeepAlive != 0 {
+ c.lastSent.Store(time.Now())
+ }
+}
+
+// getWriteTimeOut returns the writetimeout (duration to wait when writing to the connection) or 0 if none
+func (c *client) getWriteTimeOut() time.Duration {
+ return c.options.WriteTimeout
+}
+
+// persistOutbound adds the packet to the outbound store
+func (c *client) persistOutbound(m packets.ControlPacket) {
+ persistOutbound(c.persist, m)
+}
+
+// persistInbound adds the packet to the inbound store
+func (c *client) persistInbound(m packets.ControlPacket) {
+ persistInbound(c.persist, m)
+}
+
+// pingRespReceived will be called by the network routines when a ping response is received
+func (c *client) pingRespReceived() {
+ atomic.StoreInt32(&c.pingOutstanding, 0)
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/components.go b/vendor/github.com/eclipse/paho.mqtt.golang/components.go
new file mode 100644
index 0000000..23ade20
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/components.go
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+type component string
+
+// Component names for debug output
+const (
+ NET component = "[net] "
+ PNG component = "[pinger] "
+ CLI component = "[client] "
+ DEC component = "[decode] "
+ MES component = "[message] "
+ STR component = "[store] "
+ MID component = "[msgids] "
+ TST component = "[test] "
+ STA component = "[state] "
+ ERR component = "[error] "
+ ROU component = "[router] "
+)
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10
new file mode 100644
index 0000000..cf989f1
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10
@@ -0,0 +1,15 @@
+
+Eclipse Distribution License - v 1.0
+
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10
new file mode 100644
index 0000000..79e486c
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10
@@ -0,0 +1,70 @@
+Eclipse Public License - v 1.0
+
+THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+1. DEFINITIONS
+
+"Contribution" means:
+
+a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
+b) in the case of each subsequent Contributor:
+i) changes to the Program, and
+ii) additions to the Program;
+where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
+"Contributor" means any person or entity that distributes the Program.
+
+"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
+
+"Program" means the Contributions distributed in accordance with this Agreement.
+
+"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
+
+2. GRANT OF RIGHTS
+
+a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
+b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
+c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
+d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
+3. REQUIREMENTS
+
+A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
+
+a) it complies with the terms and conditions of this Agreement; and
+b) its license agreement:
+i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
+ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
+iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
+iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
+When the Program is made available in source code form:
+
+a) it must be made available under this Agreement; and
+b) a copy of this Agreement must be included with each copy of the Program.
+Contributors may not remove or alter any copyright notices contained within the Program.
+
+Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
+
+4. COMMERCIAL DISTRIBUTION
+
+Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
+
+For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
+
+5. NO WARRANTY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
+
+6. DISCLAIMER OF LIABILITY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+7. GENERAL
+
+If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
+
+If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
+
+All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
+
+Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
+
+This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
new file mode 100644
index 0000000..a4bc109
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "sort"
+ "sync"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const (
+ msgExt = ".msg"
+ tmpExt = ".tmp"
+ corruptExt = ".CORRUPT"
+)
+
+// FileStore implements the store interface using the filesystem to provide
+// true persistence, even across client failure. This is designed to use a
+// single directory per running client. If you are running multiple clients
+// on the same filesystem, you will need to be careful to specify unique
+// store directories for each.
+type FileStore struct {
+ sync.RWMutex
+ directory string
+ opened bool
+}
+
+// NewFileStore will create a new FileStore which stores its messages in the
+// directory provided.
+func NewFileStore(directory string) *FileStore {
+ store := &FileStore{
+ directory: directory,
+ opened: false,
+ }
+ return store
+}
+
+// Open will allow the FileStore to be used.
+func (store *FileStore) Open() {
+ store.Lock()
+ defer store.Unlock()
+ // if no store directory was specified in ClientOpts, by default use the
+ // current working directory
+ if store.directory == "" {
+ store.directory, _ = os.Getwd()
+ }
+
+ // if store dir exists, great, otherwise, create it
+ if !exists(store.directory) {
+ perms := os.FileMode(0770)
+ merr := os.MkdirAll(store.directory, perms)
+ chkerr(merr)
+ }
+ store.opened = true
+ DEBUG.Println(STR, "store is opened at", store.directory)
+}
+
+// Close will disallow the FileStore from being used.
+func (store *FileStore) Close() {
+ store.Lock()
+ defer store.Unlock()
+ store.opened = false
+ DEBUG.Println(STR, "store is closed")
+}
+
+// Put will put a message into the store, associated with the provided
+// key value.
+func (store *FileStore) Put(key string, m packets.ControlPacket) {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use file store, but not open")
+ return
+ }
+ full := fullpath(store.directory, key)
+ write(store.directory, key, m)
+ if !exists(full) {
+ ERROR.Println(STR, "file not created:", full)
+ }
+}
+
+// Get will retrieve a message from the store, the one associated with
+// the provided key value.
+func (store *FileStore) Get(key string) packets.ControlPacket {
+ store.RLock()
+ defer store.RUnlock()
+ if !store.opened {
+ ERROR.Println(STR, "trying to use file store, but not open")
+ return nil
+ }
+ filepath := fullpath(store.directory, key)
+ if !exists(filepath) {
+ return nil
+ }
+ mfile, oerr := os.Open(filepath)
+ chkerr(oerr)
+ msg, rerr := packets.ReadPacket(mfile)
+ chkerr(mfile.Close())
+
+ // Message was unreadable, return nil
+ if rerr != nil {
+ newpath := corruptpath(store.directory, key)
+ WARN.Println(STR, "corrupted file detected:", rerr.Error(), "archived at:", newpath)
+ if err := os.Rename(filepath, newpath); err != nil {
+ ERROR.Println(STR, err)
+ }
+ return nil
+ }
+ return msg
+}
+
+// All will provide a list of all of the keys associated with messages
+// currently residing in the FileStore.
+func (store *FileStore) All() []string {
+ store.RLock()
+ defer store.RUnlock()
+ return store.all()
+}
+
+// Del will remove the persisted message associated with the provided
+// key from the FileStore.
+func (store *FileStore) Del(key string) {
+ store.Lock()
+ defer store.Unlock()
+ store.del(key)
+}
+
+// Reset will remove all persisted messages from the FileStore.
+func (store *FileStore) Reset() {
+ store.Lock()
+ defer store.Unlock()
+ WARN.Println(STR, "FileStore Reset")
+ for _, key := range store.all() {
+ store.del(key)
+ }
+}
+
+// lockless
+func (store *FileStore) all() []string {
+ var err error
+ var keys []string
+ var files fileInfos
+
+ if !store.opened {
+ ERROR.Println(STR, "trying to use file store, but not open")
+ return nil
+ }
+
+ files, err = ioutil.ReadDir(store.directory)
+ chkerr(err)
+ sort.Sort(files)
+ for _, f := range files {
+ DEBUG.Println(STR, "file in All():", f.Name())
+ name := f.Name()
+ if name[len(name)-4:] != msgExt {
+ DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name)
+ continue
+ }
+ key := name[0 : len(name)-4] // remove file extension
+ keys = append(keys, key)
+ }
+ return keys
+}
+
+// lockless
+func (store *FileStore) del(key string) {
+ if !store.opened {
+ ERROR.Println(STR, "trying to use file store, but not open")
+ return
+ }
+ DEBUG.Println(STR, "store del filepath:", store.directory)
+ DEBUG.Println(STR, "store delete key:", key)
+ filepath := fullpath(store.directory, key)
+ DEBUG.Println(STR, "path of deletion:", filepath)
+ if !exists(filepath) {
+ WARN.Println(STR, "store could not delete key:", key)
+ return
+ }
+ rerr := os.Remove(filepath)
+ chkerr(rerr)
+ DEBUG.Println(STR, "del msg:", key)
+ if exists(filepath) {
+ ERROR.Println(STR, "file not deleted:", filepath)
+ }
+}
+
+func fullpath(store string, key string) string {
+ p := path.Join(store, key+msgExt)
+ return p
+}
+
+func tmppath(store string, key string) string {
+ p := path.Join(store, key+tmpExt)
+ return p
+}
+
+func corruptpath(store string, key string) string {
+ p := path.Join(store, key+corruptExt)
+ return p
+}
+
+// create file called "X.[messageid].tmp" located in the store
+// the contents of the file is the bytes of the message, then
+// rename it to "X.[messageid].msg", overwriting any existing
+// message with the same id
+// X will be 'i' for inbound messages, and O for outbound messages
+func write(store, key string, m packets.ControlPacket) {
+ temppath := tmppath(store, key)
+ f, err := os.Create(temppath)
+ chkerr(err)
+ werr := m.Write(f)
+ chkerr(werr)
+ cerr := f.Close()
+ chkerr(cerr)
+ rerr := os.Rename(temppath, fullpath(store, key))
+ chkerr(rerr)
+}
+
+func exists(file string) bool {
+ if _, err := os.Stat(file); err != nil {
+ if os.IsNotExist(err) {
+ return false
+ }
+ chkerr(err)
+ }
+ return true
+}
+
+type fileInfos []os.FileInfo
+
+func (f fileInfos) Len() int {
+ return len(f)
+}
+
+func (f fileInfos) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+func (f fileInfos) Less(i, j int) bool {
+ return f[i].ModTime().Before(f[j].ModTime())
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go
new file mode 100644
index 0000000..d245a5f
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "sync"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// MemoryStore implements the store interface to provide a "persistence"
+// mechanism wholly stored in memory. This is only useful for
+// as long as the client instance exists.
+type MemoryStore struct {
+ sync.RWMutex
+ messages map[string]packets.ControlPacket
+ opened bool
+}
+
+// NewMemoryStore returns a pointer to a new instance of
+// MemoryStore, the instance is not initialized and ready to
+// use until Open() has been called on it.
+func NewMemoryStore() *MemoryStore {
+ store := &MemoryStore{
+ messages: make(map[string]packets.ControlPacket),
+ opened: false,
+ }
+ return store
+}
+
+// Open initializes a MemoryStore instance.
+func (store *MemoryStore) Open() {
+ store.Lock()
+ defer store.Unlock()
+ store.opened = true
+ DEBUG.Println(STR, "memorystore initialized")
+}
+
+// Put takes a key and a pointer to a Message and stores the
+// message.
+func (store *MemoryStore) Put(key string, message packets.ControlPacket) {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use memory store, but not open")
+ return
+ }
+ store.messages[key] = message
+}
+
+// Get takes a key and looks in the store for a matching Message
+// returning either the Message pointer or nil.
+func (store *MemoryStore) Get(key string) packets.ControlPacket {
+ store.RLock()
+ defer store.RUnlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use memory store, but not open")
+ return nil
+ }
+ mid := mIDFromKey(key)
+ m := store.messages[key]
+ if m == nil {
+ CRITICAL.Println(STR, "memorystore get: message", mid, "not found")
+ } else {
+ DEBUG.Println(STR, "memorystore get: message", mid, "found")
+ }
+ return m
+}
+
+// All returns a slice of strings containing all the keys currently
+// in the MemoryStore.
+func (store *MemoryStore) All() []string {
+ store.RLock()
+ defer store.RUnlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use memory store, but not open")
+ return nil
+ }
+ var keys []string
+ for k := range store.messages {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+// Del takes a key, searches the MemoryStore and if the key is found
+// deletes the Message pointer associated with it.
+func (store *MemoryStore) Del(key string) {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use memory store, but not open")
+ return
+ }
+ mid := mIDFromKey(key)
+ m := store.messages[key]
+ if m == nil {
+ WARN.Println(STR, "memorystore del: message", mid, "not found")
+ } else {
+ delete(store.messages, key)
+ DEBUG.Println(STR, "memorystore del: message", mid, "was deleted")
+ }
+}
+
+// Close will disallow modifications to the state of the store.
+func (store *MemoryStore) Close() {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to close memory store, but not open")
+ return
+ }
+ store.opened = false
+ DEBUG.Println(STR, "memorystore closed")
+}
+
+// Reset eliminates all persisted message data in the store.
+func (store *MemoryStore) Reset() {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to reset memory store, but not open")
+ }
+ store.messages = make(map[string]packets.ControlPacket)
+ WARN.Println(STR, "memorystore wiped")
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/message.go b/vendor/github.com/eclipse/paho.mqtt.golang/message.go
new file mode 100644
index 0000000..0f8ca60
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/message.go
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "net/url"
+ "sync"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// Message defines the externals that a message implementation must support
+// these are received messages that are passed to the callbacks, not internal
+// messages
+type Message interface {
+ Duplicate() bool
+ Qos() byte
+ Retained() bool
+ Topic() string
+ MessageID() uint16
+ Payload() []byte
+ Ack()
+}
+
+type message struct {
+ duplicate bool
+ qos byte
+ retained bool
+ topic string
+ messageID uint16
+ payload []byte
+ once sync.Once
+ ack func()
+}
+
+func (m *message) Duplicate() bool {
+ return m.duplicate
+}
+
+func (m *message) Qos() byte {
+ return m.qos
+}
+
+func (m *message) Retained() bool {
+ return m.retained
+}
+
+func (m *message) Topic() string {
+ return m.topic
+}
+
+func (m *message) MessageID() uint16 {
+ return m.messageID
+}
+
+func (m *message) Payload() []byte {
+ return m.payload
+}
+
+func (m *message) Ack() {
+ m.once.Do(m.ack)
+}
+
+func messageFromPublish(p *packets.PublishPacket, ack func()) Message {
+ return &message{
+ duplicate: p.Dup,
+ qos: p.Qos,
+ retained: p.Retain,
+ topic: p.TopicName,
+ messageID: p.MessageID,
+ payload: p.Payload,
+ ack: ack,
+ }
+}
+
+func newConnectMsgFromOptions(options *ClientOptions, broker *url.URL) *packets.ConnectPacket {
+ m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
+
+ m.CleanSession = options.CleanSession
+ m.WillFlag = options.WillEnabled
+ m.WillRetain = options.WillRetained
+ m.ClientIdentifier = options.ClientID
+
+ if options.WillEnabled {
+ m.WillQos = options.WillQos
+ m.WillTopic = options.WillTopic
+ m.WillMessage = options.WillPayload
+ }
+
+ username := options.Username
+ password := options.Password
+ if broker.User != nil {
+ username = broker.User.Username()
+ if pwd, ok := broker.User.Password(); ok {
+ password = pwd
+ }
+ }
+ if options.CredentialsProvider != nil {
+ username, password = options.CredentialsProvider()
+ }
+
+ if username != "" {
+ m.UsernameFlag = true
+ m.Username = username
+ // mustn't have password without user as well
+ if password != "" {
+ m.PasswordFlag = true
+ m.Password = []byte(password)
+ }
+ }
+
+ m.Keepalive = uint16(options.KeepAlive)
+
+ return m
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go
new file mode 100644
index 0000000..26c2c0d
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+// MId is 16 bit message id as specified by the MQTT spec.
+// In general, these values should not be depended upon by
+// the client application.
+type MId uint16
+
+type messageIds struct {
+ sync.RWMutex
+ index map[uint16]tokenCompletor
+
+ lastIssuedID uint16 // The most recently issued ID. Used so we cycle through ids rather than immediately reusing them (can make debugging easier)
+}
+
+const (
+ midMin uint16 = 1
+ midMax uint16 = 65535
+)
+
+func (mids *messageIds) cleanUp() {
+ mids.Lock()
+ for _, token := range mids.index {
+ switch token.(type) {
+ case *PublishToken:
+ token.setError(fmt.Errorf("connection lost before Publish completed"))
+ case *SubscribeToken:
+ token.setError(fmt.Errorf("connection lost before Subscribe completed"))
+ case *UnsubscribeToken:
+ token.setError(fmt.Errorf("connection lost before Unsubscribe completed"))
+ case nil:
+ continue
+ }
+ token.flowComplete()
+ }
+ mids.index = make(map[uint16]tokenCompletor)
+ mids.Unlock()
+ DEBUG.Println(MID, "cleaned up")
+}
+
+func (mids *messageIds) freeID(id uint16) {
+ mids.Lock()
+ delete(mids.index, id)
+ mids.Unlock()
+}
+
+func (mids *messageIds) claimID(token tokenCompletor, id uint16) {
+ mids.Lock()
+ defer mids.Unlock()
+ if _, ok := mids.index[id]; !ok {
+ mids.index[id] = token
+ } else {
+ old := mids.index[id]
+ old.flowComplete()
+ mids.index[id] = token
+ }
+ if id > mids.lastIssuedID {
+ mids.lastIssuedID = id
+ }
+}
+
+// getID will return an available id or 0 if none available
+// The id will generally be the previous id + 1 (because this makes tracing messages a bit simpler)
+func (mids *messageIds) getID(t tokenCompletor) uint16 {
+ mids.Lock()
+ defer mids.Unlock()
+ i := mids.lastIssuedID // note: the only situation where lastIssuedID is 0 the map will be empty
+ looped := false // uint16 will loop from 65535->0
+ for {
+ i++
+ if i == 0 { // skip 0 because its not a valid id (Control Packets MUST contain a non-zero 16-bit Packet Identifier [MQTT-2.3.1-1])
+ i++
+ looped = true
+ }
+ if _, ok := mids.index[i]; !ok {
+ mids.index[i] = t
+ mids.lastIssuedID = i
+ return i
+ }
+ if (looped && i == mids.lastIssuedID) || (mids.lastIssuedID == 0 && i == midMax) { // lastIssuedID will be 0 at startup
+ return 0 // no free ids
+ }
+ }
+}
+
+func (mids *messageIds) getToken(id uint16) tokenCompletor {
+ mids.RLock()
+ defer mids.RUnlock()
+ if token, ok := mids.index[id]; ok {
+ return token
+ }
+ return &DummyToken{id: id}
+}
+
+type DummyToken struct {
+ id uint16
+}
+
+// Wait implements the Token Wait method.
+func (d *DummyToken) Wait() bool {
+ return true
+}
+
+// WaitTimeout implements the Token WaitTimeout method.
+func (d *DummyToken) WaitTimeout(t time.Duration) bool {
+ return true
+}
+
+// Done implements the Token Done method.
+func (d *DummyToken) Done() <-chan struct{} {
+ ch := make(chan struct{})
+ close(ch)
+ return ch
+}
+
+func (d *DummyToken) flowComplete() {
+ ERROR.Printf("A lookup for token %d returned nil\n", d.id)
+}
+
+func (d *DummyToken) Error() error {
+ return nil
+}
+
+func (d *DummyToken) setError(e error) {}
+
+// PlaceHolderToken does nothing and was implemented to allow a messageid to be reserved
+// it differs from DummyToken in that calling flowComplete does not generate an error (it
+// is expected that flowComplete will be called when the token is overwritten with a real token)
+type PlaceHolderToken struct {
+ id uint16
+}
+
+// Wait implements the Token Wait method.
+func (p *PlaceHolderToken) Wait() bool {
+ return true
+}
+
+// WaitTimeout implements the Token WaitTimeout method.
+func (p *PlaceHolderToken) WaitTimeout(t time.Duration) bool {
+ return true
+}
+
+// Done implements the Token Done method.
+func (p *PlaceHolderToken) Done() <-chan struct{} {
+ ch := make(chan struct{})
+ close(ch)
+ return ch
+}
+
+func (p *PlaceHolderToken) flowComplete() {
+}
+
+func (p *PlaceHolderToken) Error() error {
+ return nil
+}
+
+func (p *PlaceHolderToken) setError(e error) {}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/net.go b/vendor/github.com/eclipse/paho.mqtt.golang/net.go
new file mode 100644
index 0000000..485bb2a
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/net.go
@@ -0,0 +1,464 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "errors"
+ "io"
+ "net"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const closedNetConnErrorText = "use of closed network connection" // error string for closed conn (https://golang.org/src/net/error_test.go)
+
+// ConnectMQTT takes a connected net.Conn and performs the initial MQTT handshake. Parameters are:
+// conn - Connected net.Conn
+// cm - Connect Packet with everything other than the protocol name/version populated (historical reasons)
+// protocolVersion - The protocol version to attempt to connect with
+//
+// Note that, for backward compatibility, ConnectMQTT() suppresses the actual connection error (compare to connectMQTT()).
+func ConnectMQTT(conn net.Conn, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool) {
+ rc, sessionPresent, _ := connectMQTT(conn, cm, protocolVersion)
+ return rc, sessionPresent
+}
+
+func connectMQTT(conn io.ReadWriter, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool, error) {
+ switch protocolVersion {
+ case 3:
+ DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
+ cm.ProtocolName = "MQIsdp"
+ cm.ProtocolVersion = 3
+ case 0x83:
+ DEBUG.Println(CLI, "Using MQTT 3.1b protocol")
+ cm.ProtocolName = "MQIsdp"
+ cm.ProtocolVersion = 0x83
+ case 0x84:
+ DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol")
+ cm.ProtocolName = "MQTT"
+ cm.ProtocolVersion = 0x84
+ default:
+ DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
+ cm.ProtocolName = "MQTT"
+ cm.ProtocolVersion = 4
+ }
+
+ if err := cm.Write(conn); err != nil {
+ ERROR.Println(CLI, err)
+ return packets.ErrNetworkError, false, err
+ }
+
+ rc, sessionPresent, err := verifyCONNACK(conn)
+ return rc, sessionPresent, err
+}
+
+// This function is only used for receiving a connack
+// when the connection is first started.
+// This prevents receiving incoming data while resume
+// is in progress if clean session is false.
+func verifyCONNACK(conn io.Reader) (byte, bool, error) {
+ DEBUG.Println(NET, "connect started")
+
+ ca, err := packets.ReadPacket(conn)
+ if err != nil {
+ ERROR.Println(NET, "connect got error", err)
+ return packets.ErrNetworkError, false, err
+ }
+
+ if ca == nil {
+ ERROR.Println(NET, "received nil packet")
+ return packets.ErrNetworkError, false, errors.New("nil CONNACK packet")
+ }
+
+ msg, ok := ca.(*packets.ConnackPacket)
+ if !ok {
+ ERROR.Println(NET, "received msg that was not CONNACK")
+ return packets.ErrNetworkError, false, errors.New("non-CONNACK first packet received")
+ }
+
+ DEBUG.Println(NET, "received connack")
+ return msg.ReturnCode, msg.SessionPresent, nil
+}
+
+// inbound encapsulates the output from startIncoming.
+// err - If != nil then an error has occurred
+// cp - A control packet received over the network link
+type inbound struct {
+ err error
+ cp packets.ControlPacket
+}
+
+// startIncoming initiates a goroutine that reads incoming messages off the wire and sends them to the channel (returned).
+// If there are any issues with the network connection then the returned channel will be closed and the goroutine will exit
+// (so closing the connection will terminate the goroutine)
+func startIncoming(conn io.Reader) <-chan inbound {
+ var err error
+ var cp packets.ControlPacket
+ ibound := make(chan inbound)
+
+ DEBUG.Println(NET, "incoming started")
+
+ go func() {
+ for {
+ if cp, err = packets.ReadPacket(conn); err != nil {
+ // We do not want to log the error if it is due to the network connection having been closed
+ // elsewhere (i.e. after sending DisconnectPacket). Detecting this situation is the subject of
+ // https://github.com/golang/go/issues/4373
+ if !strings.Contains(err.Error(), closedNetConnErrorText) {
+ ibound <- inbound{err: err}
+ }
+ close(ibound)
+ DEBUG.Println(NET, "incoming complete")
+ return
+ }
+ DEBUG.Println(NET, "startIncoming Received Message")
+ ibound <- inbound{cp: cp}
+ }
+ }()
+
+ return ibound
+}
+
+// incomingComms encapsulates the possible output of the incomingComms routine. If err != nil then an error has occurred and
+// the routine will have terminated; otherwise one of the other members should be non-nil
+type incomingComms struct {
+ err error // If non-nil then there has been an error (ignore everything else)
+ outbound *PacketAndToken // Packet (with token) than needs to be sent out (e.g. an acknowledgement)
+ incomingPub *packets.PublishPacket // A new publish has been received; this will need to be passed on to our user
+}
+
+// startIncomingComms initiates incoming communications; this includes starting a goroutine to process incoming
+// messages.
+// Accepts a channel of inbound messages from the store (persisted messages); note this must be closed as soon as the
+// everything in the store has been sent.
+// Returns a channel that will be passed any received packets; this will be closed on a network error (and inboundFromStore closed)
+func startIncomingComms(conn io.Reader,
+ c commsFns,
+ inboundFromStore <-chan packets.ControlPacket,
+) <-chan incomingComms {
+ ibound := startIncoming(conn) // Start goroutine that reads from network connection
+ output := make(chan incomingComms)
+
+ DEBUG.Println(NET, "startIncomingComms started")
+ go func() {
+ for {
+ if inboundFromStore == nil && ibound == nil {
+ close(output)
+ DEBUG.Println(NET, "startIncomingComms goroutine complete")
+ return // As soon as ibound is closed we can exit (should have already processed an error)
+ }
+ DEBUG.Println(NET, "logic waiting for msg on ibound")
+
+ var msg packets.ControlPacket
+ var ok bool
+ select {
+ case msg, ok = <-inboundFromStore:
+ if !ok {
+ DEBUG.Println(NET, "startIncomingComms: inboundFromStore complete")
+ inboundFromStore = nil // should happen quickly as this is only for persisted messages
+ continue
+ }
+ DEBUG.Println(NET, "startIncomingComms: got msg from store")
+ case ibMsg, ok := <-ibound:
+ if !ok {
+ DEBUG.Println(NET, "startIncomingComms: ibound complete")
+ ibound = nil
+ continue
+ }
+ DEBUG.Println(NET, "startIncomingComms: got msg on ibound")
+ // If the inbound comms routine encounters any issues it will send us an error.
+ if ibMsg.err != nil {
+ output <- incomingComms{err: ibMsg.err}
+ continue // Usually the channel will be closed immediately after sending an error but safer that we do not assume this
+ }
+ msg = ibMsg.cp
+
+ c.persistInbound(msg)
+ c.UpdateLastReceived() // Notify keepalive logic that we recently received a packet
+ }
+
+ switch m := msg.(type) {
+ case *packets.PingrespPacket:
+ DEBUG.Println(NET, "startIncomingComms: received pingresp")
+ c.pingRespReceived()
+ case *packets.SubackPacket:
+ DEBUG.Println(NET, "startIncomingComms: received suback, id:", m.MessageID)
+ token := c.getToken(m.MessageID)
+
+ if t, ok := token.(*SubscribeToken); ok {
+ DEBUG.Println(NET, "startIncomingComms: granted qoss", m.ReturnCodes)
+ for i, qos := range m.ReturnCodes {
+ t.subResult[t.subs[i]] = qos
+ }
+ }
+
+ token.flowComplete()
+ c.freeID(m.MessageID)
+ case *packets.UnsubackPacket:
+ DEBUG.Println(NET, "startIncomingComms: received unsuback, id:", m.MessageID)
+ c.getToken(m.MessageID).flowComplete()
+ c.freeID(m.MessageID)
+ case *packets.PublishPacket:
+ DEBUG.Println(NET, "startIncomingComms: received publish, msgId:", m.MessageID)
+ output <- incomingComms{incomingPub: m}
+ case *packets.PubackPacket:
+ DEBUG.Println(NET, "startIncomingComms: received puback, id:", m.MessageID)
+ c.getToken(m.MessageID).flowComplete()
+ c.freeID(m.MessageID)
+ case *packets.PubrecPacket:
+ DEBUG.Println(NET, "startIncomingComms: received pubrec, id:", m.MessageID)
+ prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
+ prel.MessageID = m.MessageID
+ output <- incomingComms{outbound: &PacketAndToken{p: prel, t: nil}}
+ case *packets.PubrelPacket:
+ DEBUG.Println(NET, "startIncomingComms: received pubrel, id:", m.MessageID)
+ pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
+ pc.MessageID = m.MessageID
+ c.persistOutbound(pc)
+ output <- incomingComms{outbound: &PacketAndToken{p: pc, t: nil}}
+ case *packets.PubcompPacket:
+ DEBUG.Println(NET, "startIncomingComms: received pubcomp, id:", m.MessageID)
+ c.getToken(m.MessageID).flowComplete()
+ c.freeID(m.MessageID)
+ }
+ }
+ }()
+ return output
+}
+
+// startOutgoingComms initiates a go routine to transmit outgoing packets.
+// Pass in an open network connection and channels for outbound messages (including those triggered
+// directly from incoming comms).
+// Returns a channel that will receive details of any errors (closed when the goroutine exits)
+// This function wil only terminate when all input channels are closed
+func startOutgoingComms(conn net.Conn,
+ c commsFns,
+ oboundp <-chan *PacketAndToken,
+ obound <-chan *PacketAndToken,
+ oboundFromIncoming <-chan *PacketAndToken,
+) <-chan error {
+ errChan := make(chan error)
+ DEBUG.Println(NET, "outgoing started")
+
+ go func() {
+ for {
+ DEBUG.Println(NET, "outgoing waiting for an outbound message")
+
+ // This goroutine will only exits when all of the input channels we receive on have been closed. This approach is taken to avoid any
+ // deadlocks (if the connection goes down there are limited options as to what we can do with anything waiting on us and
+ // throwing away the packets seems the best option)
+ if oboundp == nil && obound == nil && oboundFromIncoming == nil {
+ DEBUG.Println(NET, "outgoing comms stopping")
+ close(errChan)
+ return
+ }
+
+ select {
+ case pub, ok := <-obound:
+ if !ok {
+ obound = nil
+ continue
+ }
+ msg := pub.p.(*packets.PublishPacket)
+ DEBUG.Println(NET, "obound msg to write", msg.MessageID)
+
+ writeTimeout := c.getWriteTimeOut()
+ if writeTimeout > 0 {
+ if err := conn.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil {
+ ERROR.Println(NET, "SetWriteDeadline ", err)
+ }
+ }
+
+ if err := msg.Write(conn); err != nil {
+ ERROR.Println(NET, "outgoing obound reporting error ", err)
+ pub.t.setError(err)
+ // report error if it's not due to the connection being closed elsewhere
+ if !strings.Contains(err.Error(), closedNetConnErrorText) {
+ errChan <- err
+ }
+ continue
+ }
+
+ if writeTimeout > 0 {
+ // If we successfully wrote, we don't want the timeout to happen during an idle period
+ // so we reset it to infinite.
+ if err := conn.SetWriteDeadline(time.Time{}); err != nil {
+ ERROR.Println(NET, "SetWriteDeadline to 0 ", err)
+ }
+ }
+
+ if msg.Qos == 0 {
+ pub.t.flowComplete()
+ }
+ DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID)
+ case msg, ok := <-oboundp:
+ if !ok {
+ oboundp = nil
+ continue
+ }
+ DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p))
+ if err := msg.p.Write(conn); err != nil {
+ ERROR.Println(NET, "outgoing oboundp reporting error ", err)
+ if msg.t != nil {
+ msg.t.setError(err)
+ }
+ errChan <- err
+ continue
+ }
+
+ if _, ok := msg.p.(*packets.DisconnectPacket); ok {
+ msg.t.(*DisconnectToken).flowComplete()
+ DEBUG.Println(NET, "outbound wrote disconnect, closing connection")
+ // As per the MQTT spec "After sending a DISCONNECT Packet the Client MUST close the Network Connection"
+ // Closing the connection will cause the goroutines to end in sequence (starting with incoming comms)
+ conn.Close()
+ }
+ case msg, ok := <-oboundFromIncoming: // message triggered by an inbound message (PubrecPacket or PubrelPacket)
+ if !ok {
+ oboundFromIncoming = nil
+ continue
+ }
+ DEBUG.Println(NET, "obound from incoming msg to write, type", reflect.TypeOf(msg.p), " ID ", msg.p.Details().MessageID)
+ if err := msg.p.Write(conn); err != nil {
+ ERROR.Println(NET, "outgoing oboundFromIncoming reporting error", err)
+ if msg.t != nil {
+ msg.t.setError(err)
+ }
+ errChan <- err
+ continue
+ }
+ }
+ c.UpdateLastSent() // Record that a packet has been received (for keepalive routine)
+ }
+ }()
+ return errChan
+}
+
+// commsFns provide access to the client state (messageids, requesting disconnection and updating timing)
+type commsFns interface {
+ getToken(id uint16) tokenCompletor // Retrieve the token for the specified messageid (if none then a dummy token must be returned)
+ freeID(id uint16) // Release the specified messageid (clearing out of any persistent store)
+ UpdateLastReceived() // Must be called whenever a packet is received
+ UpdateLastSent() // Must be called whenever a packet is successfully sent
+ getWriteTimeOut() time.Duration // Return the writetimeout (or 0 if none)
+ persistOutbound(m packets.ControlPacket) // add the packet to the outbound store
+ persistInbound(m packets.ControlPacket) // add the packet to the inbound store
+ pingRespReceived() // Called when a ping response is received
+}
+
+// startComms initiates goroutines that handles communications over the network connection
+// Messages will be stored (via commsFns) and deleted from the store as necessary
+// It returns two channels:
+// packets.PublishPacket - Will receive publish packets received over the network.
+// Closed when incoming comms routines exit (on shutdown or if network link closed)
+// error - Any errors will be sent on this channel. The channel is closed when all comms routines have shut down
+//
+// Note: The comms routines monitoring oboundp and obound will not shutdown until those channels are both closed. Any messages received between the
+// connection being closed and those channels being closed will generate errors (and nothing will be sent). That way the chance of a deadlock is
+// minimised.
+func startComms(conn net.Conn, // Network connection (must be active)
+ c commsFns, // getters and setters to enable us to cleanly interact with client
+ inboundFromStore <-chan packets.ControlPacket, // Inbound packets from the persistence store (should be closed relatively soon after startup)
+ oboundp <-chan *PacketAndToken,
+ obound <-chan *PacketAndToken) (
+ <-chan *packets.PublishPacket, // Publishpackages received over the network
+ <-chan error, // Any errors (should generally trigger a disconnect)
+) {
+ // Start inbound comms handler; this needs to be able to transmit messages so we start a go routine to add these to the priority outbound channel
+ ibound := startIncomingComms(conn, c, inboundFromStore)
+ outboundFromIncoming := make(chan *PacketAndToken) // Will accept outgoing messages triggered by startIncomingComms (e.g. acknowledgements)
+
+ // Start the outgoing handler. It is important to note that output from startIncomingComms is fed into startOutgoingComms (for ACK's)
+ oboundErr := startOutgoingComms(conn, c, oboundp, obound, outboundFromIncoming)
+ DEBUG.Println(NET, "startComms started")
+
+ // Run up go routines to handle the output from the above comms functions - these are handled in separate
+ // go routines because they can interact (e.g. ibound triggers an ACK to obound which triggers an error)
+ var wg sync.WaitGroup
+ wg.Add(2)
+
+ outPublish := make(chan *packets.PublishPacket)
+ outError := make(chan error)
+
+ // Any messages received get passed to the appropriate channel
+ go func() {
+ for ic := range ibound {
+ if ic.err != nil {
+ outError <- ic.err
+ continue
+ }
+ if ic.outbound != nil {
+ outboundFromIncoming <- ic.outbound
+ continue
+ }
+ if ic.incomingPub != nil {
+ outPublish <- ic.incomingPub
+ continue
+ }
+ ERROR.Println(STR, "startComms received empty incomingComms msg")
+ }
+ // Close channels that will not be written to again (allowing other routines to exit)
+ close(outboundFromIncoming)
+ close(outPublish)
+ wg.Done()
+ }()
+
+ // Any errors will be passed out to our caller
+ go func() {
+ for err := range oboundErr {
+ outError <- err
+ }
+ wg.Done()
+ }()
+
+ // outError is used by both routines so can only be closed when they are both complete
+ go func() {
+ wg.Wait()
+ close(outError)
+ DEBUG.Println(NET, "startComms closing outError")
+ }()
+
+ return outPublish, outError
+}
+
+// ackFunc acknowledges a packet
+// WARNING the function returned must not be called if the comms routine is shutting down or not running
+// (it needs outgoing comms in order to send the acknowledgement). Currently this is only called from
+// matchAndDispatch which will be shutdown before the comms are
+func ackFunc(oboundP chan *PacketAndToken, persist Store, packet *packets.PublishPacket) func() {
+ return func() {
+ switch packet.Qos {
+ case 2:
+ pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
+ pr.MessageID = packet.MessageID
+ DEBUG.Println(NET, "putting pubrec msg on obound")
+ oboundP <- &PacketAndToken{p: pr, t: nil}
+ DEBUG.Println(NET, "done putting pubrec msg on obound")
+ case 1:
+ pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
+ pa.MessageID = packet.MessageID
+ DEBUG.Println(NET, "putting puback msg on obound")
+ persistOutbound(persist, pa)
+ oboundP <- &PacketAndToken{p: pa, t: nil}
+ DEBUG.Println(NET, "done putting puback msg on obound")
+ case 0:
+ // do nothing, since there is no need to send an ack packet back
+ }
+ }
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/netconn.go b/vendor/github.com/eclipse/paho.mqtt.golang/netconn.go
new file mode 100644
index 0000000..0cb6cd1
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/netconn.go
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "golang.org/x/net/proxy"
+)
+
+//
+// This just establishes the network connection; once established the type of connection should be irrelevant
+//
+
+// openConnection opens a network connection using the protocol indicated in the URL.
+// Does not carry out any MQTT specific handshakes.
+func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header, websocketOptions *WebsocketOptions) (net.Conn, error) {
+ switch uri.Scheme {
+ case "ws":
+ conn, err := NewWebsocket(uri.String(), nil, timeout, headers, websocketOptions)
+ return conn, err
+ case "wss":
+ conn, err := NewWebsocket(uri.String(), tlsc, timeout, headers, websocketOptions)
+ return conn, err
+ case "mqtt", "tcp":
+ allProxy := os.Getenv("all_proxy")
+ if len(allProxy) == 0 {
+ conn, err := net.DialTimeout("tcp", uri.Host, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+ }
+ proxyDialer := proxy.FromEnvironment()
+
+ conn, err := proxyDialer.Dial("tcp", uri.Host)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+ case "unix":
+ conn, err := net.DialTimeout("unix", uri.Host, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+ case "ssl", "tls", "mqtts", "mqtt+ssl", "tcps":
+ allProxy := os.Getenv("all_proxy")
+ if len(allProxy) == 0 {
+ conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+ }
+ proxyDialer := proxy.FromEnvironment()
+
+ conn, err := proxyDialer.Dial("tcp", uri.Host)
+ if err != nil {
+ return nil, err
+ }
+
+ tlsConn := tls.Client(conn, tlsc)
+
+ err = tlsConn.Handshake()
+ if err != nil {
+ _ = conn.Close()
+ return nil, err
+ }
+
+ return tlsConn, nil
+ }
+ return nil, errors.New("unknown protocol")
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/notice.html b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html
new file mode 100644
index 0000000..f19c483
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html
@@ -0,0 +1,108 @@
+
+
+
+
+
+Eclipse Foundation Software User Agreement
+
+
+
+Eclipse Foundation Software User Agreement
+February 1, 2011
+
+Usage Of Content
+
+THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS
+ (COLLECTIVELY "CONTENT"). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND
+ CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE
+ OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR
+ NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND
+ CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.
+
+Applicable Licenses
+
+Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0
+ ("EPL"). A copy of the EPL is provided with this Content and is also available at http://www.eclipse.org/legal/epl-v10.html.
+ For purposes of the EPL, "Program" will mean the Content.
+
+Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code
+ repository ("Repository") in software modules ("Modules") and made available as downloadable archives ("Downloads").
+
+
+ - Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins ("Plug-ins"), plug-in fragments ("Fragments"), and features ("Features").
+ - Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java™ ARchive) in a directory named "plugins".
+ - A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named "features". Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of the Plug-ins
+ and/or Fragments associated with that Feature.
+ - Features may also include other Features ("Included Features"). Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of Included Features.
+
+
+The terms and conditions governing Plug-ins and Fragments should be contained in files named "about.html" ("Abouts"). The terms and conditions governing Features and
+Included Features should be contained in files named "license.html" ("Feature Licenses"). Abouts and Feature Licenses may be located in any directory of a Download or Module
+including, but not limited to the following locations:
+
+
+ - The top-level (root) directory
+ - Plug-in and Fragment directories
+ - Inside Plug-ins and Fragments packaged as JARs
+ - Sub-directories of the directory named "src" of certain Plug-ins
+ - Feature directories
+
+
+Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license ("Feature Update License") during the
+installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or
+inform you where you can locate them. Feature Update Licenses may be found in the "license" property of files named "feature.properties" found within a Feature.
+Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in
+that directory.
+
+THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE
+OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):
+
+
+
+IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please
+contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.
+
+
+Use of Provisioning Technology
+
+The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse
+ Update Manager ("Provisioning Technology") for the purpose of allowing users to install software, documentation, information and/or
+ other materials (collectively "Installable Software"). This capability is provided with the intent of allowing such users to
+ install, extend and update Eclipse-based products. Information about packaging Installable Software is available at http://eclipse.org/equinox/p2/repository_packaging.html
+ ("Specification").
+
+You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the
+ applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology
+ in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the
+ Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:
+
+
+ - A series of actions may occur ("Provisioning Process") in which a user may execute the Provisioning Technology
+ on a machine ("Target Machine") with the intent of installing, extending or updating the functionality of an Eclipse-based
+ product.
+ - During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be
+ accessed and copied to the Target Machine.
+ - Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable
+ Software ("Installable Software Agreement") and such Installable Software Agreement shall be accessed from the Target
+ Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern
+ the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such
+ indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.
+
+
+Cryptography
+
+Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to
+ another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import,
+ possession, or use, and re-export of encryption software, to see if this is permitted.
+
+Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.
+
+
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/oops.go b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go
new file mode 100644
index 0000000..39630d7
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+func chkerr(e error) {
+ if e != nil {
+ panic(e)
+ }
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options.go b/vendor/github.com/eclipse/paho.mqtt.golang/options.go
new file mode 100644
index 0000000..4a1420c
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/options.go
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ * Måns Ansgariusson
+ */
+
+// Portions copyright © 2018 TIBCO Software Inc.
+
+package mqtt
+
+import (
+ "crypto/tls"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// CredentialsProvider allows the username and password to be updated
+// before reconnecting. It should return the current username and password.
+type CredentialsProvider func() (username string, password string)
+
+// MessageHandler is a callback type which can be set to be
+// executed upon the arrival of messages published to topics
+// to which the client is subscribed.
+type MessageHandler func(Client, Message)
+
+// ConnectionLostHandler is a callback type which can be set to be
+// executed upon an unintended disconnection from the MQTT broker.
+// Disconnects caused by calling Disconnect or ForceDisconnect will
+// not cause an OnConnectionLost callback to execute.
+type ConnectionLostHandler func(Client, error)
+
+// OnConnectHandler is a callback that is called when the client
+// state changes from unconnected/disconnected to connected. Both
+// at initial connection and on reconnection
+type OnConnectHandler func(Client)
+
+// ReconnectHandler is invoked prior to reconnecting after
+// the initial connection is lost
+type ReconnectHandler func(Client, *ClientOptions)
+
+// ConnectionAttemptHandler is invoked prior to making the initial connection.
+type ConnectionAttemptHandler func(broker *url.URL, tlsCfg *tls.Config) *tls.Config
+
+// ClientOptions contains configurable options for an Client. Note that these should be set using the
+// relevant methods (e.g. AddBroker) rather than directly. See those functions for information on usage.
+type ClientOptions struct {
+ Servers []*url.URL
+ ClientID string
+ Username string
+ Password string
+ CredentialsProvider CredentialsProvider
+ CleanSession bool
+ Order bool
+ WillEnabled bool
+ WillTopic string
+ WillPayload []byte
+ WillQos byte
+ WillRetained bool
+ ProtocolVersion uint
+ protocolVersionExplicit bool
+ TLSConfig *tls.Config
+ KeepAlive int64
+ PingTimeout time.Duration
+ ConnectTimeout time.Duration
+ MaxReconnectInterval time.Duration
+ AutoReconnect bool
+ ConnectRetryInterval time.Duration
+ ConnectRetry bool
+ Store Store
+ DefaultPublishHandler MessageHandler
+ OnConnect OnConnectHandler
+ OnConnectionLost ConnectionLostHandler
+ OnReconnecting ReconnectHandler
+ OnConnectAttempt ConnectionAttemptHandler
+ WriteTimeout time.Duration
+ MessageChannelDepth uint
+ ResumeSubs bool
+ HTTPHeaders http.Header
+ WebsocketOptions *WebsocketOptions
+}
+
+// NewClientOptions will create a new ClientClientOptions type with some
+// default values.
+// Port: 1883
+// CleanSession: True
+// Order: True (note: it is recommended that this be set to FALSE unless order is important)
+// KeepAlive: 30 (seconds)
+// ConnectTimeout: 30 (seconds)
+// MaxReconnectInterval 10 (minutes)
+// AutoReconnect: True
+func NewClientOptions() *ClientOptions {
+ o := &ClientOptions{
+ Servers: nil,
+ ClientID: "",
+ Username: "",
+ Password: "",
+ CleanSession: true,
+ Order: true,
+ WillEnabled: false,
+ WillTopic: "",
+ WillPayload: nil,
+ WillQos: 0,
+ WillRetained: false,
+ ProtocolVersion: 0,
+ protocolVersionExplicit: false,
+ KeepAlive: 30,
+ PingTimeout: 10 * time.Second,
+ ConnectTimeout: 30 * time.Second,
+ MaxReconnectInterval: 10 * time.Minute,
+ AutoReconnect: true,
+ ConnectRetryInterval: 30 * time.Second,
+ ConnectRetry: false,
+ Store: nil,
+ OnConnect: nil,
+ OnConnectionLost: DefaultConnectionLostHandler,
+ OnConnectAttempt: nil,
+ WriteTimeout: 0, // 0 represents timeout disabled
+ ResumeSubs: false,
+ HTTPHeaders: make(map[string][]string),
+ WebsocketOptions: &WebsocketOptions{},
+ }
+ return o
+}
+
+// AddBroker adds a broker URI to the list of brokers to be used. The format should be
+// scheme://host:port
+// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname)
+// and "port" is the port on which the broker is accepting connections.
+//
+// Default values for hostname is "127.0.0.1", for schema is "tcp://".
+//
+// An example broker URI would look like: tcp://foobar.com:1883
+func (o *ClientOptions) AddBroker(server string) *ClientOptions {
+ if len(server) > 0 && server[0] == ':' {
+ server = "127.0.0.1" + server
+ }
+ if !strings.Contains(server, "://") {
+ server = "tcp://" + server
+ }
+ brokerURI, err := url.Parse(server)
+ if err != nil {
+ ERROR.Println(CLI, "Failed to parse %q broker address: %s", server, err)
+ return o
+ }
+ o.Servers = append(o.Servers, brokerURI)
+ return o
+}
+
+// SetResumeSubs will enable resuming of stored (un)subscribe messages when connecting
+// but not reconnecting if CleanSession is false. Otherwise these messages are discarded.
+func (o *ClientOptions) SetResumeSubs(resume bool) *ClientOptions {
+ o.ResumeSubs = resume
+ return o
+}
+
+// SetClientID will set the client id to be used by this client when
+// connecting to the MQTT broker. According to the MQTT v3.1 specification,
+// a client id must be no longer than 23 characters.
+func (o *ClientOptions) SetClientID(id string) *ClientOptions {
+ o.ClientID = id
+ return o
+}
+
+// SetUsername will set the username to be used by this client when connecting
+// to the MQTT broker. Note: without the use of SSL/TLS, this information will
+// be sent in plaintext across the wire.
+func (o *ClientOptions) SetUsername(u string) *ClientOptions {
+ o.Username = u
+ return o
+}
+
+// SetPassword will set the password to be used by this client when connecting
+// to the MQTT broker. Note: without the use of SSL/TLS, this information will
+// be sent in plaintext across the wire.
+func (o *ClientOptions) SetPassword(p string) *ClientOptions {
+ o.Password = p
+ return o
+}
+
+// SetCredentialsProvider will set a method to be called by this client when
+// connecting to the MQTT broker that provide the current username and password.
+// Note: without the use of SSL/TLS, this information will be sent
+// in plaintext across the wire.
+func (o *ClientOptions) SetCredentialsProvider(p CredentialsProvider) *ClientOptions {
+ o.CredentialsProvider = p
+ return o
+}
+
+// SetCleanSession will set the "clean session" flag in the connect message
+// when this client connects to an MQTT broker. By setting this flag, you are
+// indicating that no messages saved by the broker for this client should be
+// delivered. Any messages that were going to be sent by this client before
+// disconnecting previously but didn't will not be sent upon connecting to the
+// broker.
+func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions {
+ o.CleanSession = clean
+ return o
+}
+
+// SetOrderMatters will set the message routing to guarantee order within
+// each QoS level. By default, this value is true. If set to false (recommended),
+// this flag indicates that messages can be delivered asynchronously
+// from the client to the application and possibly arrive out of order.
+// Specifically, the message handler is called in its own go routine.
+// Note that setting this to true does not guarantee in-order delivery
+// (this is subject to broker settings like "max_inflight_messages=1" in mosquitto)
+// and if true then handlers must not block.
+func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions {
+ o.Order = order
+ return o
+}
+
+// SetTLSConfig will set an SSL/TLS configuration to be used when connecting
+// to an MQTT broker. Please read the official Go documentation for more
+// information.
+func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions {
+ o.TLSConfig = t
+ return o
+}
+
+// SetStore will set the implementation of the Store interface
+// used to provide message persistence in cases where QoS levels
+// QoS_ONE or QoS_TWO are used. If no store is provided, then the
+// client will use MemoryStore by default.
+func (o *ClientOptions) SetStore(s Store) *ClientOptions {
+ o.Store = s
+ return o
+}
+
+// SetKeepAlive will set the amount of time (in seconds) that the client
+// should wait before sending a PING request to the broker. This will
+// allow the client to know that a connection has not been lost with the
+// server.
+func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions {
+ o.KeepAlive = int64(k / time.Second)
+ return o
+}
+
+// SetPingTimeout will set the amount of time (in seconds) that the client
+// will wait after sending a PING request to the broker, before deciding
+// that the connection has been lost. Default is 10 seconds.
+func (o *ClientOptions) SetPingTimeout(k time.Duration) *ClientOptions {
+ o.PingTimeout = k
+ return o
+}
+
+// SetProtocolVersion sets the MQTT version to be used to connect to the
+// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1
+func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions {
+ if (pv >= 3 && pv <= 4) || (pv > 0x80) {
+ o.ProtocolVersion = pv
+ o.protocolVersionExplicit = true
+ }
+ return o
+}
+
+// UnsetWill will cause any set will message to be disregarded.
+func (o *ClientOptions) UnsetWill() *ClientOptions {
+ o.WillEnabled = false
+ return o
+}
+
+// SetWill accepts a string will message to be set. When the client connects,
+// it will give this will message to the broker, which will then publish the
+// provided payload (the will) to any clients that are subscribed to the provided
+// topic.
+func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions {
+ o.SetBinaryWill(topic, []byte(payload), qos, retained)
+ return o
+}
+
+// SetBinaryWill accepts a []byte will message to be set. When the client connects,
+// it will give this will message to the broker, which will then publish the
+// provided payload (the will) to any clients that are subscribed to the provided
+// topic.
+func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions {
+ o.WillEnabled = true
+ o.WillTopic = topic
+ o.WillPayload = payload
+ o.WillQos = qos
+ o.WillRetained = retained
+ return o
+}
+
+// SetDefaultPublishHandler sets the MessageHandler that will be called when a message
+// is received that does not match any known subscriptions.
+//
+// If OrderMatters is true (the defaultHandler) then callback must not block or
+// call functions within this package that may block (e.g. Publish) other than in
+// a new go routine.
+// defaultHandler must be safe for concurrent use by multiple goroutines.
+func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions {
+ o.DefaultPublishHandler = defaultHandler
+ return o
+}
+
+// SetOnConnectHandler sets the function to be called when the client is connected. Both
+// at initial connection time and upon automatic reconnect.
+func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions {
+ o.OnConnect = onConn
+ return o
+}
+
+// SetConnectionLostHandler will set the OnConnectionLost callback to be executed
+// in the case where the client unexpectedly loses connection with the MQTT broker.
+func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions {
+ o.OnConnectionLost = onLost
+ return o
+}
+
+// SetReconnectingHandler sets the OnReconnecting callback to be executed prior
+// to the client attempting a reconnect to the MQTT broker.
+func (o *ClientOptions) SetReconnectingHandler(cb ReconnectHandler) *ClientOptions {
+ o.OnReconnecting = cb
+ return o
+}
+
+// SetConnectionAttemptHandler sets the ConnectionAttemptHandler callback to be executed prior
+// to each attempt to connect to an MQTT broker. Returns the *tls.Config that will be used when establishing
+// the connection (a copy of the tls.Config from ClientOptions will be passed in along with the broker URL).
+// This allows connection specific changes to be made to the *tls.Config.
+func (o *ClientOptions) SetConnectionAttemptHandler(onConnectAttempt ConnectionAttemptHandler) *ClientOptions {
+ o.OnConnectAttempt = onConnectAttempt
+ return o
+}
+
+// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a
+// timeout error. A duration of 0 never times out. Default never times out
+func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions {
+ o.WriteTimeout = t
+ return o
+}
+
+// SetConnectTimeout limits how long the client will wait when trying to open a connection
+// to an MQTT server before timing out. A duration of 0 never times out.
+// Default 30 seconds. Currently only operational on TCP/TLS connections.
+func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions {
+ o.ConnectTimeout = t
+ return o
+}
+
+// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts
+// when connection is lost
+func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions {
+ o.MaxReconnectInterval = t
+ return o
+}
+
+// SetAutoReconnect sets whether the automatic reconnection logic should be used
+// when the connection is lost, even if disabled the ConnectionLostHandler is still
+// called
+func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions {
+ o.AutoReconnect = a
+ return o
+}
+
+// SetConnectRetryInterval sets the time that will be waited between connection attempts
+// when initially connecting if ConnectRetry is TRUE
+func (o *ClientOptions) SetConnectRetryInterval(t time.Duration) *ClientOptions {
+ o.ConnectRetryInterval = t
+ return o
+}
+
+// SetConnectRetry sets whether the connect function will automatically retry the connection
+// in the event of a failure (when true the token returned by the Connect function will
+// not complete until the connection is up or it is cancelled)
+// If ConnectRetry is true then subscriptions should be requested in OnConnect handler
+// Setting this to TRUE permits messages to be published before the connection is established
+func (o *ClientOptions) SetConnectRetry(a bool) *ClientOptions {
+ o.ConnectRetry = a
+ return o
+}
+
+// SetMessageChannelDepth DEPRECATED The value set here no longer has any effect, this function
+// remains so the API is not altered.
+func (o *ClientOptions) SetMessageChannelDepth(s uint) *ClientOptions {
+ o.MessageChannelDepth = s
+ return o
+}
+
+// SetHTTPHeaders sets the additional HTTP headers that will be sent in the WebSocket
+// opening handshake.
+func (o *ClientOptions) SetHTTPHeaders(h http.Header) *ClientOptions {
+ o.HTTPHeaders = h
+ return o
+}
+
+// SetWebsocketOptions sets the additional websocket options used in a WebSocket connection
+func (o *ClientOptions) SetWebsocketOptions(w *WebsocketOptions) *ClientOptions {
+ o.WebsocketOptions = w
+ return o
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go
new file mode 100644
index 0000000..7ff240f
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "crypto/tls"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// ClientOptionsReader provides an interface for reading ClientOptions after the client has been initialized.
+type ClientOptionsReader struct {
+ options *ClientOptions
+}
+
+// Servers returns a slice of the servers defined in the clientoptions
+func (r *ClientOptionsReader) Servers() []*url.URL {
+ s := make([]*url.URL, len(r.options.Servers))
+
+ for i, u := range r.options.Servers {
+ nu := *u
+ s[i] = &nu
+ }
+
+ return s
+}
+
+// ResumeSubs returns true if resuming stored (un)sub is enabled
+func (r *ClientOptionsReader) ResumeSubs() bool {
+ s := r.options.ResumeSubs
+ return s
+}
+
+// ClientID returns the set client id
+func (r *ClientOptionsReader) ClientID() string {
+ s := r.options.ClientID
+ return s
+}
+
+// Username returns the set username
+func (r *ClientOptionsReader) Username() string {
+ s := r.options.Username
+ return s
+}
+
+// Password returns the set password
+func (r *ClientOptionsReader) Password() string {
+ s := r.options.Password
+ return s
+}
+
+// CleanSession returns whether Cleansession is set
+func (r *ClientOptionsReader) CleanSession() bool {
+ s := r.options.CleanSession
+ return s
+}
+
+func (r *ClientOptionsReader) Order() bool {
+ s := r.options.Order
+ return s
+}
+
+func (r *ClientOptionsReader) WillEnabled() bool {
+ s := r.options.WillEnabled
+ return s
+}
+
+func (r *ClientOptionsReader) WillTopic() string {
+ s := r.options.WillTopic
+ return s
+}
+
+func (r *ClientOptionsReader) WillPayload() []byte {
+ s := r.options.WillPayload
+ return s
+}
+
+func (r *ClientOptionsReader) WillQos() byte {
+ s := r.options.WillQos
+ return s
+}
+
+func (r *ClientOptionsReader) WillRetained() bool {
+ s := r.options.WillRetained
+ return s
+}
+
+func (r *ClientOptionsReader) ProtocolVersion() uint {
+ s := r.options.ProtocolVersion
+ return s
+}
+
+func (r *ClientOptionsReader) TLSConfig() *tls.Config {
+ s := r.options.TLSConfig
+ return s
+}
+
+func (r *ClientOptionsReader) KeepAlive() time.Duration {
+ s := time.Duration(r.options.KeepAlive * int64(time.Second))
+ return s
+}
+
+func (r *ClientOptionsReader) PingTimeout() time.Duration {
+ s := r.options.PingTimeout
+ return s
+}
+
+func (r *ClientOptionsReader) ConnectTimeout() time.Duration {
+ s := r.options.ConnectTimeout
+ return s
+}
+
+func (r *ClientOptionsReader) MaxReconnectInterval() time.Duration {
+ s := r.options.MaxReconnectInterval
+ return s
+}
+
+func (r *ClientOptionsReader) AutoReconnect() bool {
+ s := r.options.AutoReconnect
+ return s
+}
+
+// ConnectRetryInterval returns the delay between retries on the initial connection (if ConnectRetry true)
+func (r *ClientOptionsReader) ConnectRetryInterval() time.Duration {
+ s := r.options.ConnectRetryInterval
+ return s
+}
+
+// ConnectRetry returns whether the initial connection request will be retried until connection established
+func (r *ClientOptionsReader) ConnectRetry() bool {
+ s := r.options.ConnectRetry
+ return s
+}
+
+func (r *ClientOptionsReader) WriteTimeout() time.Duration {
+ s := r.options.WriteTimeout
+ return s
+}
+
+func (r *ClientOptionsReader) MessageChannelDepth() uint {
+ s := r.options.MessageChannelDepth
+ return s
+}
+
+func (r *ClientOptionsReader) HTTPHeaders() http.Header {
+ h := r.options.HTTPHeaders
+ return h
+}
+
+// WebsocketOptions returns the currently configured WebSocket options
+func (r *ClientOptionsReader) WebsocketOptions() *WebsocketOptions {
+ s := r.options.WebsocketOptions
+ return s
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go
new file mode 100644
index 0000000..1a0dc95
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go
@@ -0,0 +1,52 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+// ConnackPacket is an internal representation of the fields of the
+// Connack MQTT packet
+type ConnackPacket struct {
+ FixedHeader
+ SessionPresent bool
+ ReturnCode byte
+}
+
+func (ca *ConnackPacket) String() string {
+ return fmt.Sprintf("%s sessionpresent: %t returncode: %d", ca.FixedHeader, ca.SessionPresent, ca.ReturnCode)
+}
+
+func (ca *ConnackPacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+
+ body.WriteByte(boolToByte(ca.SessionPresent))
+ body.WriteByte(ca.ReturnCode)
+ ca.FixedHeader.RemainingLength = 2
+ packet := ca.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (ca *ConnackPacket) Unpack(b io.Reader) error {
+ flags, err := decodeByte(b)
+ if err != nil {
+ return err
+ }
+ ca.SessionPresent = 1&flags > 0
+ ca.ReturnCode, err = decodeByte(b)
+
+ return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (ca *ConnackPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go
new file mode 100644
index 0000000..2184703
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go
@@ -0,0 +1,155 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+// ConnectPacket is an internal representation of the fields of the
+// Connect MQTT packet
+type ConnectPacket struct {
+ FixedHeader
+ ProtocolName string
+ ProtocolVersion byte
+ CleanSession bool
+ WillFlag bool
+ WillQos byte
+ WillRetain bool
+ UsernameFlag bool
+ PasswordFlag bool
+ ReservedBit byte
+ Keepalive uint16
+
+ ClientIdentifier string
+ WillTopic string
+ WillMessage []byte
+ Username string
+ Password []byte
+}
+
+func (c *ConnectPacket) String() string {
+ var password string
+ if len(c.Password) > 0 {
+ password = ""
+ }
+ return fmt.Sprintf("%s protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.FixedHeader, c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, password)
+}
+
+func (c *ConnectPacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+
+ body.Write(encodeString(c.ProtocolName))
+ body.WriteByte(c.ProtocolVersion)
+ body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7)
+ body.Write(encodeUint16(c.Keepalive))
+ body.Write(encodeString(c.ClientIdentifier))
+ if c.WillFlag {
+ body.Write(encodeString(c.WillTopic))
+ body.Write(encodeBytes(c.WillMessage))
+ }
+ if c.UsernameFlag {
+ body.Write(encodeString(c.Username))
+ }
+ if c.PasswordFlag {
+ body.Write(encodeBytes(c.Password))
+ }
+ c.FixedHeader.RemainingLength = body.Len()
+ packet := c.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (c *ConnectPacket) Unpack(b io.Reader) error {
+ var err error
+ c.ProtocolName, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+ c.ProtocolVersion, err = decodeByte(b)
+ if err != nil {
+ return err
+ }
+ options, err := decodeByte(b)
+ if err != nil {
+ return err
+ }
+ c.ReservedBit = 1 & options
+ c.CleanSession = 1&(options>>1) > 0
+ c.WillFlag = 1&(options>>2) > 0
+ c.WillQos = 3 & (options >> 3)
+ c.WillRetain = 1&(options>>5) > 0
+ c.PasswordFlag = 1&(options>>6) > 0
+ c.UsernameFlag = 1&(options>>7) > 0
+ c.Keepalive, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+ c.ClientIdentifier, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+ if c.WillFlag {
+ c.WillTopic, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+ c.WillMessage, err = decodeBytes(b)
+ if err != nil {
+ return err
+ }
+ }
+ if c.UsernameFlag {
+ c.Username, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+ }
+ if c.PasswordFlag {
+ c.Password, err = decodeBytes(b)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Validate performs validation of the fields of a Connect packet
+func (c *ConnectPacket) Validate() byte {
+ if c.PasswordFlag && !c.UsernameFlag {
+ return ErrRefusedBadUsernameOrPassword
+ }
+ if c.ReservedBit != 0 {
+ // Bad reserved bit
+ return ErrProtocolViolation
+ }
+ if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) {
+ // Mismatched or unsupported protocol version
+ return ErrRefusedBadProtocolVersion
+ }
+ if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" {
+ // Bad protocol name
+ return ErrProtocolViolation
+ }
+ if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 {
+ // Bad size field
+ return ErrProtocolViolation
+ }
+ if len(c.ClientIdentifier) == 0 && !c.CleanSession {
+ // Bad client identifier
+ return ErrRefusedIDRejected
+ }
+ return Accepted
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (c *ConnectPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go
new file mode 100644
index 0000000..a24a84f
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go
@@ -0,0 +1,34 @@
+package packets
+
+import (
+ "io"
+)
+
+// DisconnectPacket is an internal representation of the fields of the
+// Disconnect MQTT packet
+type DisconnectPacket struct {
+ FixedHeader
+}
+
+func (d *DisconnectPacket) String() string {
+ return d.FixedHeader.String()
+}
+
+func (d *DisconnectPacket) Write(w io.Writer) error {
+ packet := d.FixedHeader.pack()
+ _, err := packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (d *DisconnectPacket) Unpack(b io.Reader) error {
+ return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (d *DisconnectPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go
new file mode 100644
index 0000000..3c734b3
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go
@@ -0,0 +1,356 @@
+package packets
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// ControlPacket defines the interface for structs intended to hold
+// decoded MQTT packets, either from being read or before being
+// written
+type ControlPacket interface {
+ Write(io.Writer) error
+ Unpack(io.Reader) error
+ String() string
+ Details() Details
+}
+
+// PacketNames maps the constants for each of the MQTT packet types
+// to a string representation of their name.
+var PacketNames = map[uint8]string{
+ 1: "CONNECT",
+ 2: "CONNACK",
+ 3: "PUBLISH",
+ 4: "PUBACK",
+ 5: "PUBREC",
+ 6: "PUBREL",
+ 7: "PUBCOMP",
+ 8: "SUBSCRIBE",
+ 9: "SUBACK",
+ 10: "UNSUBSCRIBE",
+ 11: "UNSUBACK",
+ 12: "PINGREQ",
+ 13: "PINGRESP",
+ 14: "DISCONNECT",
+}
+
+// Below are the constants assigned to each of the MQTT packet types
+const (
+ Connect = 1
+ Connack = 2
+ Publish = 3
+ Puback = 4
+ Pubrec = 5
+ Pubrel = 6
+ Pubcomp = 7
+ Subscribe = 8
+ Suback = 9
+ Unsubscribe = 10
+ Unsuback = 11
+ Pingreq = 12
+ Pingresp = 13
+ Disconnect = 14
+)
+
+// Below are the const definitions for error codes returned by
+// Connect()
+const (
+ Accepted = 0x00
+ ErrRefusedBadProtocolVersion = 0x01
+ ErrRefusedIDRejected = 0x02
+ ErrRefusedServerUnavailable = 0x03
+ ErrRefusedBadUsernameOrPassword = 0x04
+ ErrRefusedNotAuthorised = 0x05
+ ErrNetworkError = 0xFE
+ ErrProtocolViolation = 0xFF
+)
+
+// ConnackReturnCodes is a map of the error codes constants for Connect()
+// to a string representation of the error
+var ConnackReturnCodes = map[uint8]string{
+ 0: "Connection Accepted",
+ 1: "Connection Refused: Bad Protocol Version",
+ 2: "Connection Refused: Client Identifier Rejected",
+ 3: "Connection Refused: Server Unavailable",
+ 4: "Connection Refused: Username or Password in unknown format",
+ 5: "Connection Refused: Not Authorised",
+ 254: "Connection Error",
+ 255: "Connection Refused: Protocol Violation",
+}
+
+var (
+ ErrorRefusedBadProtocolVersion = errors.New("unacceptable protocol version")
+ ErrorRefusedIDRejected = errors.New("identifier rejected")
+ ErrorRefusedServerUnavailable = errors.New("server Unavailable")
+ ErrorRefusedBadUsernameOrPassword = errors.New("bad user name or password")
+ ErrorRefusedNotAuthorised = errors.New("not Authorized")
+ ErrorNetworkError = errors.New("network Error")
+ ErrorProtocolViolation = errors.New("protocol Violation")
+)
+
+// ConnErrors is a map of the errors codes constants for Connect()
+// to a Go error
+var ConnErrors = map[byte]error{
+ Accepted: nil,
+ ErrRefusedBadProtocolVersion: ErrorRefusedBadProtocolVersion,
+ ErrRefusedIDRejected: ErrorRefusedIDRejected,
+ ErrRefusedServerUnavailable: ErrorRefusedServerUnavailable,
+ ErrRefusedBadUsernameOrPassword: ErrorRefusedBadUsernameOrPassword,
+ ErrRefusedNotAuthorised: ErrorRefusedNotAuthorised,
+ ErrNetworkError: ErrorNetworkError,
+ ErrProtocolViolation: ErrorProtocolViolation,
+}
+
+// ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts
+// to read an MQTT packet from the stream. It returns a ControlPacket
+// representing the decoded MQTT packet and an error. One of these returns will
+// always be nil, a nil ControlPacket indicating an error occurred.
+func ReadPacket(r io.Reader) (ControlPacket, error) {
+ var fh FixedHeader
+ b := make([]byte, 1)
+
+ _, err := io.ReadFull(r, b)
+ if err != nil {
+ return nil, err
+ }
+
+ err = fh.unpack(b[0], r)
+ if err != nil {
+ return nil, err
+ }
+
+ cp, err := NewControlPacketWithHeader(fh)
+ if err != nil {
+ return nil, err
+ }
+
+ packetBytes := make([]byte, fh.RemainingLength)
+ n, err := io.ReadFull(r, packetBytes)
+ if err != nil {
+ return nil, err
+ }
+ if n != fh.RemainingLength {
+ return nil, errors.New("failed to read expected data")
+ }
+
+ err = cp.Unpack(bytes.NewBuffer(packetBytes))
+ return cp, err
+}
+
+// NewControlPacket is used to create a new ControlPacket of the type specified
+// by packetType, this is usually done by reference to the packet type constants
+// defined in packets.go. The newly created ControlPacket is empty and a pointer
+// is returned.
+func NewControlPacket(packetType byte) ControlPacket {
+ switch packetType {
+ case Connect:
+ return &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}}
+ case Connack:
+ return &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}}
+ case Disconnect:
+ return &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}}
+ case Publish:
+ return &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}}
+ case Puback:
+ return &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}}
+ case Pubrec:
+ return &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}}
+ case Pubrel:
+ return &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}}
+ case Pubcomp:
+ return &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}}
+ case Subscribe:
+ return &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}}
+ case Suback:
+ return &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}}
+ case Unsubscribe:
+ return &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}}
+ case Unsuback:
+ return &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}}
+ case Pingreq:
+ return &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}}
+ case Pingresp:
+ return &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}}
+ }
+ return nil
+}
+
+// NewControlPacketWithHeader is used to create a new ControlPacket of the type
+// specified within the FixedHeader that is passed to the function.
+// The newly created ControlPacket is empty and a pointer is returned.
+func NewControlPacketWithHeader(fh FixedHeader) (ControlPacket, error) {
+ switch fh.MessageType {
+ case Connect:
+ return &ConnectPacket{FixedHeader: fh}, nil
+ case Connack:
+ return &ConnackPacket{FixedHeader: fh}, nil
+ case Disconnect:
+ return &DisconnectPacket{FixedHeader: fh}, nil
+ case Publish:
+ return &PublishPacket{FixedHeader: fh}, nil
+ case Puback:
+ return &PubackPacket{FixedHeader: fh}, nil
+ case Pubrec:
+ return &PubrecPacket{FixedHeader: fh}, nil
+ case Pubrel:
+ return &PubrelPacket{FixedHeader: fh}, nil
+ case Pubcomp:
+ return &PubcompPacket{FixedHeader: fh}, nil
+ case Subscribe:
+ return &SubscribePacket{FixedHeader: fh}, nil
+ case Suback:
+ return &SubackPacket{FixedHeader: fh}, nil
+ case Unsubscribe:
+ return &UnsubscribePacket{FixedHeader: fh}, nil
+ case Unsuback:
+ return &UnsubackPacket{FixedHeader: fh}, nil
+ case Pingreq:
+ return &PingreqPacket{FixedHeader: fh}, nil
+ case Pingresp:
+ return &PingrespPacket{FixedHeader: fh}, nil
+ }
+ return nil, fmt.Errorf("unsupported packet type 0x%x", fh.MessageType)
+}
+
+// Details struct returned by the Details() function called on
+// ControlPackets to present details of the Qos and MessageID
+// of the ControlPacket
+type Details struct {
+ Qos byte
+ MessageID uint16
+}
+
+// FixedHeader is a struct to hold the decoded information from
+// the fixed header of an MQTT ControlPacket
+type FixedHeader struct {
+ MessageType byte
+ Dup bool
+ Qos byte
+ Retain bool
+ RemainingLength int
+}
+
+func (fh FixedHeader) String() string {
+ return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength)
+}
+
+func boolToByte(b bool) byte {
+ switch b {
+ case true:
+ return 1
+ default:
+ return 0
+ }
+}
+
+func (fh *FixedHeader) pack() bytes.Buffer {
+ var header bytes.Buffer
+ header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain))
+ header.Write(encodeLength(fh.RemainingLength))
+ return header
+}
+
+func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) error {
+ fh.MessageType = typeAndFlags >> 4
+ fh.Dup = (typeAndFlags>>3)&0x01 > 0
+ fh.Qos = (typeAndFlags >> 1) & 0x03
+ fh.Retain = typeAndFlags&0x01 > 0
+
+ var err error
+ fh.RemainingLength, err = decodeLength(r)
+ return err
+}
+
+func decodeByte(b io.Reader) (byte, error) {
+ num := make([]byte, 1)
+ _, err := b.Read(num)
+ if err != nil {
+ return 0, err
+ }
+
+ return num[0], nil
+}
+
+func decodeUint16(b io.Reader) (uint16, error) {
+ num := make([]byte, 2)
+ _, err := b.Read(num)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint16(num), nil
+}
+
+func encodeUint16(num uint16) []byte {
+ bytesResult := make([]byte, 2)
+ binary.BigEndian.PutUint16(bytesResult, num)
+ return bytesResult
+}
+
+func encodeString(field string) []byte {
+ return encodeBytes([]byte(field))
+}
+
+func decodeString(b io.Reader) (string, error) {
+ buf, err := decodeBytes(b)
+ return string(buf), err
+}
+
+func decodeBytes(b io.Reader) ([]byte, error) {
+ fieldLength, err := decodeUint16(b)
+ if err != nil {
+ return nil, err
+ }
+
+ field := make([]byte, fieldLength)
+ _, err = b.Read(field)
+ if err != nil {
+ return nil, err
+ }
+
+ return field, nil
+}
+
+func encodeBytes(field []byte) []byte {
+ fieldLength := make([]byte, 2)
+ binary.BigEndian.PutUint16(fieldLength, uint16(len(field)))
+ return append(fieldLength, field...)
+}
+
+func encodeLength(length int) []byte {
+ var encLength []byte
+ for {
+ digit := byte(length % 128)
+ length /= 128
+ if length > 0 {
+ digit |= 0x80
+ }
+ encLength = append(encLength, digit)
+ if length == 0 {
+ break
+ }
+ }
+ return encLength
+}
+
+func decodeLength(r io.Reader) (int, error) {
+ var rLength uint32
+ var multiplier uint32
+ b := make([]byte, 1)
+ for multiplier < 27 { // fix: Infinite '(digit & 128) == 1' will cause the dead loop
+ _, err := io.ReadFull(r, b)
+ if err != nil {
+ return 0, err
+ }
+
+ digit := b[0]
+ rLength |= uint32(digit&127) << multiplier
+ if (digit & 128) == 0 {
+ break
+ }
+ multiplier += 7
+ }
+ return int(rLength), nil
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go
new file mode 100644
index 0000000..4fa54b2
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go
@@ -0,0 +1,34 @@
+package packets
+
+import (
+ "io"
+)
+
+// PingreqPacket is an internal representation of the fields of the
+// Pingreq MQTT packet
+type PingreqPacket struct {
+ FixedHeader
+}
+
+func (pr *PingreqPacket) String() string {
+ return pr.FixedHeader.String()
+}
+
+func (pr *PingreqPacket) Write(w io.Writer) error {
+ packet := pr.FixedHeader.pack()
+ _, err := packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pr *PingreqPacket) Unpack(b io.Reader) error {
+ return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pr *PingreqPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go
new file mode 100644
index 0000000..61388ca
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go
@@ -0,0 +1,34 @@
+package packets
+
+import (
+ "io"
+)
+
+// PingrespPacket is an internal representation of the fields of the
+// Pingresp MQTT packet
+type PingrespPacket struct {
+ FixedHeader
+}
+
+func (pr *PingrespPacket) String() string {
+ return pr.FixedHeader.String()
+}
+
+func (pr *PingrespPacket) Write(w io.Writer) error {
+ packet := pr.FixedHeader.pack()
+ _, err := packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pr *PingrespPacket) Unpack(b io.Reader) error {
+ return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pr *PingrespPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go
new file mode 100644
index 0000000..5e6c381
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go
@@ -0,0 +1,42 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+// PubackPacket is an internal representation of the fields of the
+// Puback MQTT packet
+type PubackPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (pa *PubackPacket) String() string {
+ return fmt.Sprintf("%s MessageID: %d", pa.FixedHeader, pa.MessageID)
+}
+
+func (pa *PubackPacket) Write(w io.Writer) error {
+ var err error
+ pa.FixedHeader.RemainingLength = 2
+ packet := pa.FixedHeader.pack()
+ packet.Write(encodeUint16(pa.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pa *PubackPacket) Unpack(b io.Reader) error {
+ var err error
+ pa.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pa *PubackPacket) Details() Details {
+ return Details{Qos: pa.Qos, MessageID: pa.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go
new file mode 100644
index 0000000..47e043d
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go
@@ -0,0 +1,42 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+// PubcompPacket is an internal representation of the fields of the
+// Pubcomp MQTT packet
+type PubcompPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (pc *PubcompPacket) String() string {
+ return fmt.Sprintf("%s MessageID: %d", pc.FixedHeader, pc.MessageID)
+}
+
+func (pc *PubcompPacket) Write(w io.Writer) error {
+ var err error
+ pc.FixedHeader.RemainingLength = 2
+ packet := pc.FixedHeader.pack()
+ packet.Write(encodeUint16(pc.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pc *PubcompPacket) Unpack(b io.Reader) error {
+ var err error
+ pc.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pc *PubcompPacket) Details() Details {
+ return Details{Qos: pc.Qos, MessageID: pc.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go
new file mode 100644
index 0000000..cdd76b1
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go
@@ -0,0 +1,83 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+// PublishPacket is an internal representation of the fields of the
+// Publish MQTT packet
+type PublishPacket struct {
+ FixedHeader
+ TopicName string
+ MessageID uint16
+ Payload []byte
+}
+
+func (p *PublishPacket) String() string {
+ return fmt.Sprintf("%s topicName: %s MessageID: %d payload: %s", p.FixedHeader, p.TopicName, p.MessageID, string(p.Payload))
+}
+
+func (p *PublishPacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+
+ body.Write(encodeString(p.TopicName))
+ if p.Qos > 0 {
+ body.Write(encodeUint16(p.MessageID))
+ }
+ p.FixedHeader.RemainingLength = body.Len() + len(p.Payload)
+ packet := p.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ packet.Write(p.Payload)
+ _, err = w.Write(packet.Bytes())
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (p *PublishPacket) Unpack(b io.Reader) error {
+ var payloadLength = p.FixedHeader.RemainingLength
+ var err error
+ p.TopicName, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+
+ if p.Qos > 0 {
+ p.MessageID, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+ payloadLength -= len(p.TopicName) + 4
+ } else {
+ payloadLength -= len(p.TopicName) + 2
+ }
+ if payloadLength < 0 {
+ return fmt.Errorf("error unpacking publish, payload length < 0")
+ }
+ p.Payload = make([]byte, payloadLength)
+ _, err = b.Read(p.Payload)
+
+ return err
+}
+
+// Copy creates a new PublishPacket with the same topic and payload
+// but an empty fixed header, useful for when you want to deliver
+// a message with different properties such as Qos but the same
+// content
+func (p *PublishPacket) Copy() *PublishPacket {
+ newP := NewControlPacket(Publish).(*PublishPacket)
+ newP.TopicName = p.TopicName
+ newP.Payload = p.Payload
+
+ return newP
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (p *PublishPacket) Details() Details {
+ return Details{Qos: p.Qos, MessageID: p.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go
new file mode 100644
index 0000000..f0a72c5
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go
@@ -0,0 +1,42 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+// PubrecPacket is an internal representation of the fields of the
+// Pubrec MQTT packet
+type PubrecPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (pr *PubrecPacket) String() string {
+ return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID)
+}
+
+func (pr *PubrecPacket) Write(w io.Writer) error {
+ var err error
+ pr.FixedHeader.RemainingLength = 2
+ packet := pr.FixedHeader.pack()
+ packet.Write(encodeUint16(pr.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pr *PubrecPacket) Unpack(b io.Reader) error {
+ var err error
+ pr.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pr *PubrecPacket) Details() Details {
+ return Details{Qos: pr.Qos, MessageID: pr.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go
new file mode 100644
index 0000000..af29a82
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go
@@ -0,0 +1,42 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+// PubrelPacket is an internal representation of the fields of the
+// Pubrel MQTT packet
+type PubrelPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (pr *PubrelPacket) String() string {
+ return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID)
+}
+
+func (pr *PubrelPacket) Write(w io.Writer) error {
+ var err error
+ pr.FixedHeader.RemainingLength = 2
+ packet := pr.FixedHeader.pack()
+ packet.Write(encodeUint16(pr.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (pr *PubrelPacket) Unpack(b io.Reader) error {
+ var err error
+ pr.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (pr *PubrelPacket) Details() Details {
+ return Details{Qos: pr.Qos, MessageID: pr.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go
new file mode 100644
index 0000000..cdea660
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go
@@ -0,0 +1,57 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+// SubackPacket is an internal representation of the fields of the
+// Suback MQTT packet
+type SubackPacket struct {
+ FixedHeader
+ MessageID uint16
+ ReturnCodes []byte
+}
+
+func (sa *SubackPacket) String() string {
+ return fmt.Sprintf("%s MessageID: %d", sa.FixedHeader, sa.MessageID)
+}
+
+func (sa *SubackPacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+ body.Write(encodeUint16(sa.MessageID))
+ body.Write(sa.ReturnCodes)
+ sa.FixedHeader.RemainingLength = body.Len()
+ packet := sa.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (sa *SubackPacket) Unpack(b io.Reader) error {
+ var qosBuffer bytes.Buffer
+ var err error
+ sa.MessageID, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+
+ _, err = qosBuffer.ReadFrom(b)
+ if err != nil {
+ return err
+ }
+ sa.ReturnCodes = qosBuffer.Bytes()
+
+ return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (sa *SubackPacket) Details() Details {
+ return Details{Qos: 0, MessageID: sa.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go
new file mode 100644
index 0000000..daef901
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go
@@ -0,0 +1,69 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+// SubscribePacket is an internal representation of the fields of the
+// Subscribe MQTT packet
+type SubscribePacket struct {
+ FixedHeader
+ MessageID uint16
+ Topics []string
+ Qoss []byte
+}
+
+func (s *SubscribePacket) String() string {
+ return fmt.Sprintf("%s MessageID: %d topics: %s", s.FixedHeader, s.MessageID, s.Topics)
+}
+
+func (s *SubscribePacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+
+ body.Write(encodeUint16(s.MessageID))
+ for i, topic := range s.Topics {
+ body.Write(encodeString(topic))
+ body.WriteByte(s.Qoss[i])
+ }
+ s.FixedHeader.RemainingLength = body.Len()
+ packet := s.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (s *SubscribePacket) Unpack(b io.Reader) error {
+ var err error
+ s.MessageID, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+ payloadLength := s.FixedHeader.RemainingLength - 2
+ for payloadLength > 0 {
+ topic, err := decodeString(b)
+ if err != nil {
+ return err
+ }
+ s.Topics = append(s.Topics, topic)
+ qos, err := decodeByte(b)
+ if err != nil {
+ return err
+ }
+ s.Qoss = append(s.Qoss, qos)
+ payloadLength -= 2 + len(topic) + 1 // 2 bytes of string length, plus string, plus 1 byte for Qos
+ }
+
+ return nil
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (s *SubscribePacket) Details() Details {
+ return Details{Qos: 1, MessageID: s.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go
new file mode 100644
index 0000000..bdf5fd3
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go
@@ -0,0 +1,42 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+// UnsubackPacket is an internal representation of the fields of the
+// Unsuback MQTT packet
+type UnsubackPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (ua *UnsubackPacket) String() string {
+ return fmt.Sprintf("%s MessageID: %d", ua.FixedHeader, ua.MessageID)
+}
+
+func (ua *UnsubackPacket) Write(w io.Writer) error {
+ var err error
+ ua.FixedHeader.RemainingLength = 2
+ packet := ua.FixedHeader.pack()
+ packet.Write(encodeUint16(ua.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (ua *UnsubackPacket) Unpack(b io.Reader) error {
+ var err error
+ ua.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (ua *UnsubackPacket) Details() Details {
+ return Details{Qos: 0, MessageID: ua.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go
new file mode 100644
index 0000000..9ccb850
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go
@@ -0,0 +1,56 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+// UnsubscribePacket is an internal representation of the fields of the
+// Unsubscribe MQTT packet
+type UnsubscribePacket struct {
+ FixedHeader
+ MessageID uint16
+ Topics []string
+}
+
+func (u *UnsubscribePacket) String() string {
+ return fmt.Sprintf("%s MessageID: %d", u.FixedHeader, u.MessageID)
+}
+
+func (u *UnsubscribePacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+ body.Write(encodeUint16(u.MessageID))
+ for _, topic := range u.Topics {
+ body.Write(encodeString(topic))
+ }
+ u.FixedHeader.RemainingLength = body.Len()
+ packet := u.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+// Unpack decodes the details of a ControlPacket after the fixed
+// header has been read
+func (u *UnsubscribePacket) Unpack(b io.Reader) error {
+ var err error
+ u.MessageID, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+
+ for topic, err := decodeString(b); err == nil && topic != ""; topic, err = decodeString(b) {
+ u.Topics = append(u.Topics, topic)
+ }
+
+ return err
+}
+
+// Details returns a Details struct containing the Qos and
+// MessageID of this ControlPacket
+func (u *UnsubscribePacket) Details() Details {
+ return Details{Qos: 1, MessageID: u.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/ping.go b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go
new file mode 100644
index 0000000..50421f4
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "errors"
+ "io"
+ "sync/atomic"
+ "time"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// keepalive - Send ping when connection unused for set period
+// connection passed in to avoid race condition on shutdown
+func keepalive(c *client, conn io.Writer) {
+ defer c.workers.Done()
+ DEBUG.Println(PNG, "keepalive starting")
+ var checkInterval int64
+ var pingSent time.Time
+
+ if c.options.KeepAlive > 10 {
+ checkInterval = 5
+ } else {
+ checkInterval = c.options.KeepAlive / 2
+ }
+
+ intervalTicker := time.NewTicker(time.Duration(checkInterval * int64(time.Second)))
+ defer intervalTicker.Stop()
+
+ for {
+ select {
+ case <-c.stop:
+ DEBUG.Println(PNG, "keepalive stopped")
+ return
+ case <-intervalTicker.C:
+ lastSent := c.lastSent.Load().(time.Time)
+ lastReceived := c.lastReceived.Load().(time.Time)
+
+ DEBUG.Println(PNG, "ping check", time.Since(lastSent).Seconds())
+ if time.Since(lastSent) >= time.Duration(c.options.KeepAlive*int64(time.Second)) || time.Since(lastReceived) >= time.Duration(c.options.KeepAlive*int64(time.Second)) {
+ if atomic.LoadInt32(&c.pingOutstanding) == 0 {
+ DEBUG.Println(PNG, "keepalive sending ping")
+ ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
+ // We don't want to wait behind large messages being sent, the Write call
+ // will block until it it able to send the packet.
+ atomic.StoreInt32(&c.pingOutstanding, 1)
+ if err := ping.Write(conn); err != nil {
+ ERROR.Println(PNG, err)
+ }
+ c.lastSent.Store(time.Now())
+ pingSent = time.Now()
+ }
+ }
+ if atomic.LoadInt32(&c.pingOutstanding) > 0 && time.Since(pingSent) >= c.options.PingTimeout {
+ CRITICAL.Println(PNG, "pingresp not received, disconnecting")
+ c.internalConnLost(errors.New("pingresp not received, disconnecting")) // no harm in calling this if the connection is already down (or shutdown is in progress)
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/router.go b/vendor/github.com/eclipse/paho.mqtt.golang/router.go
new file mode 100644
index 0000000..4737d87
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/router.go
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "container/list"
+ "strings"
+ "sync"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// route is a type which associates MQTT Topic strings with a
+// callback to be executed upon the arrival of a message associated
+// with a subscription to that topic.
+type route struct {
+ topic string
+ callback MessageHandler
+}
+
+// match takes a slice of strings which represent the route being tested having been split on '/'
+// separators, and a slice of strings representing the topic string in the published message, similarly
+// split.
+// The function determines if the topic string matches the route according to the MQTT topic rules
+// and returns a boolean of the outcome
+func match(route []string, topic []string) bool {
+ if len(route) == 0 {
+ return len(topic) == 0
+ }
+
+ if len(topic) == 0 {
+ return route[0] == "#"
+ }
+
+ if route[0] == "#" {
+ return true
+ }
+
+ if (route[0] == "+") || (route[0] == topic[0]) {
+ return match(route[1:], topic[1:])
+ }
+ return false
+}
+
+func routeIncludesTopic(route, topic string) bool {
+ return match(routeSplit(route), strings.Split(topic, "/"))
+}
+
+// removes $share and sharename when splitting the route to allow
+// shared subscription routes to correctly match the topic
+func routeSplit(route string) []string {
+ var result []string
+ if strings.HasPrefix(route, "$share") {
+ result = strings.Split(route, "/")[2:]
+ } else {
+ result = strings.Split(route, "/")
+ }
+ return result
+}
+
+// match takes the topic string of the published message and does a basic compare to the
+// string of the current Route, if they match it returns true
+func (r *route) match(topic string) bool {
+ return r.topic == topic || routeIncludesTopic(r.topic, topic)
+}
+
+type router struct {
+ sync.RWMutex
+ routes *list.List
+ defaultHandler MessageHandler
+ messages chan *packets.PublishPacket
+}
+
+// newRouter returns a new instance of a Router and channel which can be used to tell the Router
+// to stop
+func newRouter() *router {
+ router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket)}
+ return router
+}
+
+// addRoute takes a topic string and MessageHandler callback. It looks in the current list of
+// routes to see if there is already a matching Route. If there is it replaces the current
+// callback with the new one. If not it add a new entry to the list of Routes.
+func (r *router) addRoute(topic string, callback MessageHandler) {
+ r.Lock()
+ defer r.Unlock()
+ for e := r.routes.Front(); e != nil; e = e.Next() {
+ if e.Value.(*route).topic == topic {
+ r := e.Value.(*route)
+ r.callback = callback
+ return
+ }
+ }
+ r.routes.PushBack(&route{topic: topic, callback: callback})
+}
+
+// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If
+// found it removes the Route from the list.
+func (r *router) deleteRoute(topic string) {
+ r.Lock()
+ defer r.Unlock()
+ for e := r.routes.Front(); e != nil; e = e.Next() {
+ if e.Value.(*route).topic == topic {
+ r.routes.Remove(e)
+ return
+ }
+ }
+}
+
+// setDefaultHandler assigns a default callback that will be called if no matching Route
+// is found for an incoming Publish.
+func (r *router) setDefaultHandler(handler MessageHandler) {
+ r.Lock()
+ defer r.Unlock()
+ r.defaultHandler = handler
+}
+
+// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that
+// takes messages off the channel, matches them against the internal route list and calls the
+// associated callback (or the defaultHandler, if one exists and no other route matched). If
+// anything is sent down the stop channel the function will end.
+func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *client) <-chan *PacketAndToken {
+ var wg sync.WaitGroup
+ ackOutChan := make(chan *PacketAndToken) // Channel returned to caller; closed when messages channel closed
+ var ackInChan chan *PacketAndToken // ACKs generated by ackFunc get put onto this channel
+
+ stopAckCopy := make(chan struct{}) // Closure requests stop of go routine copying ackInChan to ackOutChan
+ ackCopyStopped := make(chan struct{}) // Closure indicates that it is safe to close ackOutChan
+ goRoutinesDone := make(chan struct{}) // closed on wg.Done()
+ if order {
+ ackInChan = ackOutChan // When order = true no go routines are used so safe to use one channel and close when done
+ } else {
+ // When order = false ACK messages are sent in go routines so ackInChan cannot be closed until all goroutines done
+ ackInChan = make(chan *PacketAndToken)
+ go func() { // go routine to copy from ackInChan to ackOutChan until stopped
+ for {
+ select {
+ case a := <-ackInChan:
+ ackOutChan <- a
+ case <-stopAckCopy:
+ close(ackCopyStopped) // Signal main go routine that it is safe to close ackOutChan
+ for {
+ select {
+ case <-ackInChan: // drain ackInChan to ensure all goRoutines can complete cleanly (ACK dropped)
+ DEBUG.Println(ROU, "matchAndDispatch received acknowledgment after processing stopped (ACK dropped).")
+ case <-goRoutinesDone:
+ close(ackInChan) // Nothing further should be sent (a panic is probably better than silent failure)
+ DEBUG.Println(ROU, "matchAndDispatch order=false copy goroutine exiting.")
+ return
+ }
+ }
+ }
+ }
+ }()
+ }
+
+ go func() { // Main go routine handling inbound messages
+ for message := range messages {
+ // DEBUG.Println(ROU, "matchAndDispatch received message")
+ sent := false
+ r.RLock()
+ m := messageFromPublish(message, ackFunc(ackInChan, client.persist, message))
+ var handlers []MessageHandler
+ for e := r.routes.Front(); e != nil; e = e.Next() {
+ if e.Value.(*route).match(message.TopicName) {
+ if order {
+ handlers = append(handlers, e.Value.(*route).callback)
+ } else {
+ hd := e.Value.(*route).callback
+ wg.Add(1)
+ go func() {
+ hd(client, m)
+ m.Ack()
+ wg.Done()
+ }()
+ }
+ sent = true
+ }
+ }
+ if !sent {
+ if r.defaultHandler != nil {
+ if order {
+ handlers = append(handlers, r.defaultHandler)
+ } else {
+ wg.Add(1)
+ go func() {
+ r.defaultHandler(client, m)
+ m.Ack()
+ wg.Done()
+ }()
+ }
+ } else {
+ DEBUG.Println(ROU, "matchAndDispatch received message and no handler was available. Message will NOT be acknowledged.")
+ }
+ }
+ r.RUnlock()
+ for _, handler := range handlers {
+ handler(client, m)
+ m.Ack()
+ }
+ // DEBUG.Println(ROU, "matchAndDispatch handled message")
+ }
+ if order {
+ close(ackOutChan)
+ } else { // Ensure that nothing further will be written to ackOutChan before closing it
+ close(stopAckCopy)
+ <-ackCopyStopped
+ close(ackOutChan)
+ go func() {
+ wg.Wait() // Note: If this remains running then the user has handlers that are not returning
+ close(goRoutinesDone)
+ }()
+ }
+ DEBUG.Println(ROU, "matchAndDispatch exiting")
+ }()
+ return ackOutChan
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/store.go b/vendor/github.com/eclipse/paho.mqtt.golang/store.go
new file mode 100644
index 0000000..24a76b7
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/store.go
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const (
+ inboundPrefix = "i."
+ outboundPrefix = "o."
+)
+
+// Store is an interface which can be used to provide implementations
+// for message persistence.
+// Because we may have to store distinct messages with the same
+// message ID, we need a unique key for each message. This is
+// possible by prepending "i." or "o." to each message id
+type Store interface {
+ Open()
+ Put(key string, message packets.ControlPacket)
+ Get(key string) packets.ControlPacket
+ All() []string
+ Del(key string)
+ Close()
+ Reset()
+}
+
+// A key MUST have the form "X.[messageid]"
+// where X is 'i' or 'o'
+func mIDFromKey(key string) uint16 {
+ s := key[2:]
+ i, err := strconv.Atoi(s)
+ chkerr(err)
+ return uint16(i)
+}
+
+// Return true if key prefix is outbound
+func isKeyOutbound(key string) bool {
+ return key[:2] == outboundPrefix
+}
+
+// Return true if key prefix is inbound
+func isKeyInbound(key string) bool {
+ return key[:2] == inboundPrefix
+}
+
+// Return a string of the form "i.[id]"
+func inboundKeyFromMID(id uint16) string {
+ return fmt.Sprintf("%s%d", inboundPrefix, id)
+}
+
+// Return a string of the form "o.[id]"
+func outboundKeyFromMID(id uint16) string {
+ return fmt.Sprintf("%s%d", outboundPrefix, id)
+}
+
+// govern which outgoing messages are persisted
+func persistOutbound(s Store, m packets.ControlPacket) {
+ switch m.Details().Qos {
+ case 0:
+ switch m.(type) {
+ case *packets.PubackPacket, *packets.PubcompPacket:
+ // Sending puback. delete matching publish
+ // from ibound
+ s.Del(inboundKeyFromMID(m.Details().MessageID))
+ }
+ case 1:
+ switch m.(type) {
+ case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket:
+ // Sending publish. store in obound
+ // until puback received
+ s.Put(outboundKeyFromMID(m.Details().MessageID), m)
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid message type")
+ }
+ case 2:
+ switch m.(type) {
+ case *packets.PublishPacket:
+ // Sending publish. store in obound
+ // until pubrel received
+ s.Put(outboundKeyFromMID(m.Details().MessageID), m)
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid message type")
+ }
+ }
+}
+
+// govern which incoming messages are persisted
+func persistInbound(s Store, m packets.ControlPacket) {
+ switch m.Details().Qos {
+ case 0:
+ switch m.(type) {
+ case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket:
+ // Received a puback. delete matching publish
+ // from obound
+ s.Del(outboundKeyFromMID(m.Details().MessageID))
+ case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket:
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid messages type")
+ }
+ case 1:
+ switch m.(type) {
+ case *packets.PublishPacket, *packets.PubrelPacket:
+ // Received a publish. store it in ibound
+ // until puback sent
+ s.Put(inboundKeyFromMID(m.Details().MessageID), m)
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid messages type")
+ }
+ case 2:
+ switch m.(type) {
+ case *packets.PublishPacket:
+ // Received a publish. store it in ibound
+ // until pubrel received
+ s.Put(inboundKeyFromMID(m.Details().MessageID), m)
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid messages type")
+ }
+ }
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/token.go b/vendor/github.com/eclipse/paho.mqtt.golang/token.go
new file mode 100644
index 0000000..dade215
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/token.go
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2014 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Allan Stockdill-Mander
+ */
+
+package mqtt
+
+import (
+ "sync"
+ "time"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// PacketAndToken is a struct that contains both a ControlPacket and a
+// Token. This struct is passed via channels between the client interface
+// code and the underlying code responsible for sending and receiving
+// MQTT messages.
+type PacketAndToken struct {
+ p packets.ControlPacket
+ t tokenCompletor
+}
+
+// Token defines the interface for the tokens used to indicate when
+// actions have completed.
+type Token interface {
+ // Wait will wait indefinitely for the Token to complete, ie the Publish
+ // to be sent and confirmed receipt from the broker.
+ Wait() bool
+
+ // WaitTimeout takes a time.Duration to wait for the flow associated with the
+ // Token to complete, returns true if it returned before the timeout or
+ // returns false if the timeout occurred. In the case of a timeout the Token
+ // does not have an error set in case the caller wishes to wait again.
+ WaitTimeout(time.Duration) bool
+
+ // Done returns a channel that is closed when the flow associated
+ // with the Token completes. Clients should call Error after the
+ // channel is closed to check if the flow completed successfully.
+ //
+ // Done is provided for use in select statements. Simple use cases may
+ // use Wait or WaitTimeout.
+ Done() <-chan struct{}
+
+ Error() error
+}
+
+type TokenErrorSetter interface {
+ setError(error)
+}
+
+type tokenCompletor interface {
+ Token
+ TokenErrorSetter
+ flowComplete()
+}
+
+type baseToken struct {
+ m sync.RWMutex
+ complete chan struct{}
+ err error
+}
+
+// Wait implements the Token Wait method.
+func (b *baseToken) Wait() bool {
+ <-b.complete
+ return true
+}
+
+// WaitTimeout implements the Token WaitTimeout method.
+func (b *baseToken) WaitTimeout(d time.Duration) bool {
+ timer := time.NewTimer(d)
+ select {
+ case <-b.complete:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ return true
+ case <-timer.C:
+ }
+
+ return false
+}
+
+// Done implements the Token Done method.
+func (b *baseToken) Done() <-chan struct{} {
+ return b.complete
+}
+
+func (b *baseToken) flowComplete() {
+ select {
+ case <-b.complete:
+ default:
+ close(b.complete)
+ }
+}
+
+func (b *baseToken) Error() error {
+ b.m.RLock()
+ defer b.m.RUnlock()
+ return b.err
+}
+
+func (b *baseToken) setError(e error) {
+ b.m.Lock()
+ b.err = e
+ b.flowComplete()
+ b.m.Unlock()
+}
+
+func newToken(tType byte) tokenCompletor {
+ switch tType {
+ case packets.Connect:
+ return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}}
+ case packets.Subscribe:
+ return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)}
+ case packets.Publish:
+ return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}}
+ case packets.Unsubscribe:
+ return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}}
+ case packets.Disconnect:
+ return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}}
+ }
+ return nil
+}
+
+// ConnectToken is an extension of Token containing the extra fields
+// required to provide information about calls to Connect()
+type ConnectToken struct {
+ baseToken
+ returnCode byte
+ sessionPresent bool
+}
+
+// ReturnCode returns the acknowledgement code in the connack sent
+// in response to a Connect()
+func (c *ConnectToken) ReturnCode() byte {
+ c.m.RLock()
+ defer c.m.RUnlock()
+ return c.returnCode
+}
+
+// SessionPresent returns a bool representing the value of the
+// session present field in the connack sent in response to a Connect()
+func (c *ConnectToken) SessionPresent() bool {
+ c.m.RLock()
+ defer c.m.RUnlock()
+ return c.sessionPresent
+}
+
+// PublishToken is an extension of Token containing the extra fields
+// required to provide information about calls to Publish()
+type PublishToken struct {
+ baseToken
+ messageID uint16
+}
+
+// MessageID returns the MQTT message ID that was assigned to the
+// Publish packet when it was sent to the broker
+func (p *PublishToken) MessageID() uint16 {
+ return p.messageID
+}
+
+// SubscribeToken is an extension of Token containing the extra fields
+// required to provide information about calls to Subscribe()
+type SubscribeToken struct {
+ baseToken
+ subs []string
+ subResult map[string]byte
+ messageID uint16
+}
+
+// Result returns a map of topics that were subscribed to along with
+// the matching return code from the broker. This is either the Qos
+// value of the subscription or an error code.
+func (s *SubscribeToken) Result() map[string]byte {
+ s.m.RLock()
+ defer s.m.RUnlock()
+ return s.subResult
+}
+
+// UnsubscribeToken is an extension of Token containing the extra fields
+// required to provide information about calls to Unsubscribe()
+type UnsubscribeToken struct {
+ baseToken
+ messageID uint16
+}
+
+// DisconnectToken is an extension of Token containing the extra fields
+// required to provide information about calls to Disconnect()
+type DisconnectToken struct {
+ baseToken
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/topic.go b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go
new file mode 100644
index 0000000..6604e68
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "errors"
+ "strings"
+)
+
+// ErrInvalidQos is the error returned when an packet is to be sent
+// with an invalid Qos value
+var ErrInvalidQos = errors.New("invalid QoS")
+
+// ErrInvalidTopicEmptyString is the error returned when a topic string
+// is passed in that is 0 length
+var ErrInvalidTopicEmptyString = errors.New("invalid Topic; empty string")
+
+// ErrInvalidTopicMultilevel is the error returned when a topic string
+// is passed in that has the multi level wildcard in any position but
+// the last
+var ErrInvalidTopicMultilevel = errors.New("invalid Topic; multi-level wildcard must be last level")
+
+// Topic Names and Topic Filters
+// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard
+// to the validity of Topic strings.
+// - A Topic must be between 1 and 65535 bytes.
+// - A Topic is case sensitive.
+// - A Topic may contain whitespace.
+// - A Topic containing a leading forward slash is different than a Topic without.
+// - A Topic may be "/" (two levels, both empty string).
+// - A Topic must be UTF-8 encoded.
+// - A Topic may contain any number of levels.
+// - A Topic may contain an empty level (two forward slashes in a row).
+// - A TopicName may not contain a wildcard.
+// - A TopicFilter may only have a # (multi-level) wildcard as the last level.
+// - A TopicFilter may contain any number of + (single-level) wildcards.
+// - A TopicFilter with a # will match the absence of a level
+// Example: a subscription to "foo/#" will match messages published to "foo".
+
+func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) {
+ if len(subs) == 0 {
+ return nil, nil, errors.New("invalid subscription; subscribe map must not be empty")
+ }
+
+ var topics []string
+ var qoss []byte
+ for topic, qos := range subs {
+ if err := validateTopicAndQos(topic, qos); err != nil {
+ return nil, nil, err
+ }
+ topics = append(topics, topic)
+ qoss = append(qoss, qos)
+ }
+
+ return topics, qoss, nil
+}
+
+func validateTopicAndQos(topic string, qos byte) error {
+ if len(topic) == 0 {
+ return ErrInvalidTopicEmptyString
+ }
+
+ levels := strings.Split(topic, "/")
+ for i, level := range levels {
+ if level == "#" && i != len(levels)-1 {
+ return ErrInvalidTopicMultilevel
+ }
+ }
+
+ if qos > 2 {
+ return ErrInvalidQos
+ }
+ return nil
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/trace.go b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go
new file mode 100644
index 0000000..904a664
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+type (
+ // Logger interface allows implementations to provide to this package any
+ // object that implements the methods defined in it.
+ Logger interface {
+ Println(v ...interface{})
+ Printf(format string, v ...interface{})
+ }
+
+ // NOOPLogger implements the logger that does not perform any operation
+ // by default. This allows us to efficiently discard the unwanted messages.
+ NOOPLogger struct{}
+)
+
+func (NOOPLogger) Println(v ...interface{}) {}
+func (NOOPLogger) Printf(format string, v ...interface{}) {}
+
+// Internal levels of library output that are initialised to not print
+// anything but can be overridden by programmer
+var (
+ ERROR Logger = NOOPLogger{}
+ CRITICAL Logger = NOOPLogger{}
+ WARN Logger = NOOPLogger{}
+ DEBUG Logger = NOOPLogger{}
+)
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/websocket.go b/vendor/github.com/eclipse/paho.mqtt.golang/websocket.go
new file mode 100644
index 0000000..9fa41ce
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/websocket.go
@@ -0,0 +1,119 @@
+package mqtt
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/gorilla/websocket"
+)
+
+// WebsocketOptions are config options for a websocket dialer
+type WebsocketOptions struct {
+ ReadBufferSize int
+ WriteBufferSize int
+ Proxy ProxyFunction
+}
+
+type ProxyFunction func(req *http.Request) (*url.URL, error)
+
+// NewWebsocket returns a new websocket and returns a net.Conn compatible interface using the gorilla/websocket package
+func NewWebsocket(host string, tlsc *tls.Config, timeout time.Duration, requestHeader http.Header, options *WebsocketOptions) (net.Conn, error) {
+ if timeout == 0 {
+ timeout = 10 * time.Second
+ }
+
+ if options == nil {
+ // Apply default options
+ options = &WebsocketOptions{}
+ }
+ if options.Proxy == nil {
+ options.Proxy = http.ProxyFromEnvironment
+ }
+ dialer := &websocket.Dialer{
+ Proxy: options.Proxy,
+ HandshakeTimeout: timeout,
+ EnableCompression: false,
+ TLSClientConfig: tlsc,
+ Subprotocols: []string{"mqtt"},
+ ReadBufferSize: options.ReadBufferSize,
+ WriteBufferSize: options.WriteBufferSize,
+ }
+
+ ws, resp, err := dialer.Dial(host, requestHeader)
+
+ if err != nil {
+ if resp != nil {
+ WARN.Println(CLI, fmt.Sprintf("Websocket handshake failure. StatusCode: %d. Body: %s", resp.StatusCode, resp.Body))
+ }
+ return nil, err
+ }
+
+ wrapper := &websocketConnector{
+ Conn: ws,
+ }
+ return wrapper, err
+}
+
+// websocketConnector is a websocket wrapper so it satisfies the net.Conn interface so it is a
+// drop in replacement of the golang.org/x/net/websocket package.
+// Implementation guide taken from https://github.com/gorilla/websocket/issues/282
+type websocketConnector struct {
+ *websocket.Conn
+ r io.Reader
+ rio sync.Mutex
+ wio sync.Mutex
+}
+
+// SetDeadline sets both the read and write deadlines
+func (c *websocketConnector) SetDeadline(t time.Time) error {
+ if err := c.SetReadDeadline(t); err != nil {
+ return err
+ }
+ err := c.SetWriteDeadline(t)
+ return err
+}
+
+// Write writes data to the websocket
+func (c *websocketConnector) Write(p []byte) (int, error) {
+ c.wio.Lock()
+ defer c.wio.Unlock()
+
+ err := c.WriteMessage(websocket.BinaryMessage, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+// Read reads the current websocket frame
+func (c *websocketConnector) Read(p []byte) (int, error) {
+ c.rio.Lock()
+ defer c.rio.Unlock()
+ for {
+ if c.r == nil {
+ // Advance to next message.
+ var err error
+ _, c.r, err = c.NextReader()
+ if err != nil {
+ return 0, err
+ }
+ }
+ n, err := c.r.Read(p)
+ if err == io.EOF {
+ // At end of message.
+ c.r = nil
+ if n > 0 {
+ return n, nil
+ }
+ // No data read, continue to next message.
+ continue
+ }
+ return n, err
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
new file mode 100644
index 0000000..e810e6f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/buffer.go
@@ -0,0 +1,324 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ WireVarint = 0
+ WireFixed32 = 5
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+)
+
+// EncodeVarint returns the varint encoded bytes of v.
+func EncodeVarint(v uint64) []byte {
+ return protowire.AppendVarint(nil, v)
+}
+
+// SizeVarint returns the length of the varint encoded bytes of v.
+// This is equal to len(EncodeVarint(v)).
+func SizeVarint(v uint64) int {
+ return protowire.SizeVarint(v)
+}
+
+// DecodeVarint parses a varint encoded integer from b,
+// returning the integer value and the length of the varint.
+// It returns (0, 0) if there is a parse error.
+func DecodeVarint(b []byte) (uint64, int) {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0
+ }
+ return v, n
+}
+
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
+// It may be reused between invocations to reduce memory usage.
+type Buffer struct {
+ buf []byte
+ idx int
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer initialized with buf,
+// where the contents of buf are considered the unread portion of the buffer.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+// SetDeterministic specifies whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (b *Buffer) SetDeterministic(deterministic bool) {
+ b.deterministic = deterministic
+}
+
+// SetBuf sets buf as the internal buffer,
+// where the contents of buf are considered the unread portion of the buffer.
+func (b *Buffer) SetBuf(buf []byte) {
+ b.buf = buf
+ b.idx = 0
+}
+
+// Reset clears the internal buffer of all written and unread data.
+func (b *Buffer) Reset() {
+ b.buf = b.buf[:0]
+ b.idx = 0
+}
+
+// Bytes returns the internal buffer.
+func (b *Buffer) Bytes() []byte {
+ return b.buf
+}
+
+// Unread returns the unread portion of the buffer.
+func (b *Buffer) Unread() []byte {
+ return b.buf[b.idx:]
+}
+
+// Marshal appends the wire-format encoding of m to the buffer.
+func (b *Buffer) Marshal(m Message) error {
+ var err error
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// Unmarshal parses the wire-format message in the buffer and
+// places the decoded results in m.
+// It does not reset m before unmarshaling.
+func (b *Buffer) Unmarshal(m Message) error {
+ err := UnmarshalMerge(b.Unread(), m)
+ b.idx = len(b.buf)
+ return err
+}
+
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
+
+func (m *unknownFields) String() string { panic("not implemented") }
+func (m *unknownFields) Reset() { panic("not implemented") }
+func (m *unknownFields) ProtoMessage() { panic("not implemented") }
+
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
+// to stdout. This is only intended for debugging.
+func (*Buffer) DebugPrint(s string, b []byte) {
+ m := MessageReflect(new(unknownFields))
+ m.SetUnknown(b)
+ b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
+ fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
+}
+
+// EncodeVarint appends an unsigned varint encoding to the buffer.
+func (b *Buffer) EncodeVarint(v uint64) error {
+ b.buf = protowire.AppendVarint(b.buf, v)
+ return nil
+}
+
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag32(v uint64) error {
+ return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+}
+
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag64(v uint64) error {
+ return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
+}
+
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed32(v uint64) error {
+ b.buf = protowire.AppendFixed32(b.buf, uint32(v))
+ return nil
+}
+
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed64(v uint64) error {
+ b.buf = protowire.AppendFixed64(b.buf, uint64(v))
+ return nil
+}
+
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
+func (b *Buffer) EncodeRawBytes(v []byte) error {
+ b.buf = protowire.AppendBytes(b.buf, v)
+ return nil
+}
+
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
+// It does not validate whether v contains valid UTF-8.
+func (b *Buffer) EncodeStringBytes(v string) error {
+ b.buf = protowire.AppendString(b.buf, v)
+ return nil
+}
+
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
+func (b *Buffer) EncodeMessage(m Message) error {
+ var err error
+ b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
+func (b *Buffer) DecodeVarint() (uint64, error) {
+ v, n := protowire.ConsumeVarint(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
+}
+
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
+}
+
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed32() (uint64, error) {
+ v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed64() (uint64, error) {
+ v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
+// If alloc is specified, it returns a copy the raw bytes
+// rather than a sub-slice of the buffer.
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
+ v, n := protowire.ConsumeBytes(b.buf[b.idx:])
+ if n < 0 {
+ return nil, protowire.ParseError(n)
+ }
+ b.idx += n
+ if alloc {
+ v = append([]byte(nil), v...)
+ }
+ return v, nil
+}
+
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
+// It does not validate whether the raw bytes contain valid UTF-8.
+func (b *Buffer) DecodeStringBytes() (string, error) {
+ v, n := protowire.ConsumeString(b.buf[b.idx:])
+ if n < 0 {
+ return "", protowire.ParseError(n)
+ }
+ b.idx += n
+ return v, nil
+}
+
+// DecodeMessage consumes a length-prefixed message from the buffer.
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeMessage(m Message) error {
+ v, err := b.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return UnmarshalMerge(v, m)
+}
+
+// DecodeGroup consumes a message group from the buffer.
+// It assumes that the start group marker has already been consumed and
+// consumes all bytes until (and including the end group marker).
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeGroup(m Message) error {
+ v, n, err := consumeGroup(b.buf[b.idx:])
+ if err != nil {
+ return err
+ }
+ b.idx += n
+ return UnmarshalMerge(v, m)
+}
+
+// consumeGroup parses b until it finds an end group marker, returning
+// the raw bytes of the message (excluding the end group marker) and the
+// the total length of the message (including the end group marker).
+func consumeGroup(b []byte) ([]byte, int, error) {
+ b0 := b
+ depth := 1 // assume this follows a start group marker
+ for {
+ _, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return nil, 0, protowire.ParseError(tagLen)
+ }
+ b = b[tagLen:]
+
+ var valLen int
+ switch wtyp {
+ case protowire.VarintType:
+ _, valLen = protowire.ConsumeVarint(b)
+ case protowire.Fixed32Type:
+ _, valLen = protowire.ConsumeFixed32(b)
+ case protowire.Fixed64Type:
+ _, valLen = protowire.ConsumeFixed64(b)
+ case protowire.BytesType:
+ _, valLen = protowire.ConsumeBytes(b)
+ case protowire.StartGroupType:
+ depth++
+ case protowire.EndGroupType:
+ depth--
+ default:
+ return nil, 0, errors.New("proto: cannot parse reserved wire type")
+ }
+ if valLen < 0 {
+ return nil, 0, protowire.ParseError(valLen)
+ }
+ b = b[valLen:]
+
+ if depth == 0 {
+ return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
new file mode 100644
index 0000000..d399bf0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/defaults.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// SetDefaults sets unpopulated scalar fields to their default values.
+// Fields within a oneof are not set even if they have a default value.
+// SetDefaults is recursively called upon any populated message fields.
+func SetDefaults(m Message) {
+ if m != nil {
+ setDefaults(MessageReflect(m))
+ }
+}
+
+func setDefaults(m protoreflect.Message) {
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if !m.Has(fd) {
+ if fd.HasDefault() && fd.ContainingOneof() == nil {
+ v := fd.Default()
+ if fd.Kind() == protoreflect.BytesKind {
+ v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
+ }
+ m.Set(fd, v)
+ }
+ continue
+ }
+ }
+
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ setDefaults(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ setDefaults(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ setDefaults(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..e8db57e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,113 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+
+ protoV2 "google.golang.org/protobuf/proto"
+)
+
+var (
+ // Deprecated: No longer returned.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // Deprecated: No longer returned.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+
+ // Deprecated: No longer returned.
+ ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+)
+
+// Deprecated: Do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: Do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: Do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func RegisterMessageSetType(Message, int32, string) {}
+
+// Deprecated: Do not use.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// Deprecated: Do not use.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// Deprecated: Do not use; this type existed for intenal-use only.
+type InternalMessageInfo struct{}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
+ DiscardUnknown(m)
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
+ return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Size(m Message) int {
+ return protoV2.Size(MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
+ return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..2187e87
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+func DiscardUnknown(m Message) {
+ if m != nil {
+ discardUnknown(MessageReflect(m))
+ }
+}
+
+func discardUnknown(m protoreflect.Message) {
+ m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ discardUnknown(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ discardUnknown(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ discardUnknown(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+
+ // Discard unknown fields.
+ if len(m.GetUnknown()) > 0 {
+ m.SetUnknown(nil)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..42fc120
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,356 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+type (
+ // ExtensionDesc represents an extension descriptor and
+ // is used to interact with an extension field in a message.
+ //
+ // Variables of this type are generated in code by protoc-gen-go.
+ ExtensionDesc = protoimpl.ExtensionInfo
+
+ // ExtensionRange represents a range of message extensions.
+ // Used in code generated by protoc-gen-go.
+ ExtensionRange = protoiface.ExtensionRangeV1
+
+ // Deprecated: Do not use; this is an internal type.
+ Extension = protoimpl.ExtensionFieldV1
+
+ // Deprecated: Do not use; this is an internal type.
+ XXX_InternalExtensions = protoimpl.ExtensionFields
+)
+
+// ErrMissingExtension reports whether the extension was not present.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+// HasExtension reports whether the extension field is present in m
+// either as an explicitly populated field or as an unknown field.
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return false
+ }
+
+ // Check whether any populated known field matches the field number.
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ has = mr.Has(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ has = int32(fd.Number()) == xt.Field
+ return !has
+ })
+ }
+
+ // Check whether any unknown field matches the field number.
+ for b := mr.GetUnknown(); !has && len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ has = int32(num) == xt.Field
+ b = b[n:]
+ }
+ return has
+}
+
+// ClearExtension removes the extension field from m
+// either as an explicitly populated field or as an unknown field.
+func ClearExtension(m Message, xt *ExtensionDesc) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ mr.Clear(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if int32(fd.Number()) == xt.Field {
+ mr.Clear(fd)
+ return false
+ }
+ return true
+ })
+ }
+ clearUnknown(mr, fieldNum(xt.Field))
+}
+
+// ClearAllExtensions clears all extensions from m.
+// This includes populated fields and unknown fields in the extension range.
+func ClearAllExtensions(m Message) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if fd.IsExtension() {
+ mr.Clear(fd)
+ }
+ return true
+ })
+ clearUnknown(mr, mr.Descriptor().ExtensionRanges())
+}
+
+// GetExtension retrieves a proto2 extended field from m.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes for the extension field.
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Retrieve the unknown fields for this extension field.
+ var bo protoreflect.RawFields
+ for bi := mr.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if int32(num) == xt.Field {
+ bo = append(bo, bi[:n]...)
+ }
+ bi = bi[n:]
+ }
+
+ // For type incomplete descriptors, only retrieve the unknown fields.
+ if xt.ExtensionType == nil {
+ return []byte(bo), nil
+ }
+
+ // If the extension field only exists as unknown fields, unmarshal it.
+ // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+ }
+ if !mr.Has(xtd) && len(bo) > 0 {
+ m2 := mr.New()
+ if err := (proto.UnmarshalOptions{
+ Resolver: extensionResolver{xt},
+ }.Unmarshal(bo, m2.Interface())); err != nil {
+ return nil, err
+ }
+ if m2.Has(xtd) {
+ mr.Set(xtd, m2.Get(xtd))
+ clearUnknown(mr, fieldNum(xt.Field))
+ }
+ }
+
+ // Check whether the message has the extension field set or a default.
+ var pv protoreflect.Value
+ switch {
+ case mr.Has(xtd):
+ pv = mr.Get(xtd)
+ case xtd.HasDefault():
+ pv = xtd.Default()
+ default:
+ return nil, ErrMissingExtension
+ }
+
+ v := xt.InterfaceOf(pv)
+ rv := reflect.ValueOf(v)
+ if isScalarKind(rv.Kind()) {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ return v, nil
+}
+
+// extensionResolver is a custom extension resolver that stores a single
+// extension type that takes precedence over the global registry.
+type extensionResolver struct{ xt protoreflect.ExtensionType }
+
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
+ return r.xt, nil
+ }
+ return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
+ return r.xt, nil
+ }
+ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+// GetExtensions returns a list of the extensions values present in m,
+// corresponding with the provided list of extension descriptors, xts.
+// If an extension is missing in m, the corresponding value is nil.
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return nil, errNotExtendable
+ }
+
+ vs := make([]interface{}, len(xts))
+ for i, xt := range xts {
+ v, err := GetExtension(m, xt)
+ if err != nil {
+ if err == ErrMissingExtension {
+ continue
+ }
+ return vs, err
+ }
+ vs[i] = v
+ }
+ return vs, nil
+}
+
+// SetExtension sets an extension field in m to the provided value.
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return errNotExtendable
+ }
+
+ rv := reflect.ValueOf(v)
+ if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
+ }
+ if rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
+ }
+ if isScalarKind(rv.Elem().Kind()) {
+ v = rv.Elem().Interface()
+ }
+ }
+
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+ }
+ mr.Set(xtd, xt.ValueOf(v))
+ clearUnknown(mr, fieldNum(xt.Field))
+ return nil
+}
+
+// SetRawExtension inserts b into the unknown fields of m.
+//
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
+func SetRawExtension(m Message, fnum int32, b []byte) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ // Verify that the raw field is valid.
+ for b0 := b; len(b0) > 0; {
+ num, _, n := protowire.ConsumeField(b0)
+ if int32(num) != fnum {
+ panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
+ }
+ b0 = b0[n:]
+ }
+
+ ClearExtension(m, &ExtensionDesc{Field: fnum})
+ mr.SetUnknown(append(mr.GetUnknown(), b...))
+}
+
+// ExtensionDescs returns a list of extension descriptors found in m,
+// containing descriptors for both populated extension fields in m and
+// also unknown fields of m that are in the extension range.
+// For the later case, an type incomplete descriptor is provided where only
+// the ExtensionDesc.Field field is populated.
+// The order of the extension descriptors is undefined.
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Collect a set of known extension descriptors.
+ extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
+ mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor)
+ if xd, ok := xt.Type().(*ExtensionDesc); ok {
+ extDescs[fd.Number()] = xd
+ }
+ }
+ return true
+ })
+
+ // Collect a set of unknown extension descriptors.
+ extRanges := mr.Descriptor().ExtensionRanges()
+ for b := mr.GetUnknown(); len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ if extRanges.Has(num) && extDescs[num] == nil {
+ extDescs[num] = nil
+ }
+ b = b[n:]
+ }
+
+ // Transpose the set of descriptors into a list.
+ var xts []*ExtensionDesc
+ for num, xt := range extDescs {
+ if xt == nil {
+ xt = &ExtensionDesc{Field: int32(num)}
+ }
+ xts = append(xts, xt)
+ }
+ return xts, nil
+}
+
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
+ return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
+}
+
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
+// This function exists for historical reasons since the representation of
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
+func isScalarKind(k reflect.Kind) bool {
+ switch k {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ return true
+ default:
+ return false
+ }
+}
+
+// clearUnknown removes unknown fields from m where remover.Has reports true.
+func clearUnknown(m protoreflect.Message, remover interface {
+ Has(protoreflect.FieldNumber) bool
+}) {
+ var bo protoreflect.RawFields
+ for bi := m.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if !remover.Has(num) {
+ bo = append(bo, bi[:n]...)
+ }
+ bi = bi[n:]
+ }
+ if bi := m.GetUnknown(); len(bi) != len(bo) {
+ m.SetUnknown(bo)
+ }
+}
+
+type fieldNum protoreflect.FieldNumber
+
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
+ return protoreflect.FieldNumber(n1) == n2
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..dcdc220
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,306 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// StructProperties represents protocol buffer type information for a
+// generated protobuf message in the open-struct API.
+//
+// Deprecated: Do not use.
+type StructProperties struct {
+ // Prop are the properties for each field.
+ //
+ // Fields belonging to a oneof are stored in OneofTypes instead, with a
+ // single Properties representing the parent oneof held here.
+ //
+ // The order of Prop matches the order of fields in the Go struct.
+ // Struct fields that are not related to protobufs have a "XXX_" prefix
+ // in the Properties.Name and must be ignored by the user.
+ Prop []*Properties
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the protobuf field name.
+ OneofTypes map[string]*OneofProperties
+}
+
+// Properties represents the type information for a protobuf message field.
+//
+// Deprecated: Do not use.
+type Properties struct {
+ // Name is a placeholder name with little meaningful semantic value.
+ // If the name has an "XXX_" prefix, the entire Properties must be ignored.
+ Name string
+ // OrigName is the protobuf field name or oneof name.
+ OrigName string
+ // JSONName is the JSON name for the protobuf field.
+ JSONName string
+ // Enum is a placeholder name for enums.
+ // For historical reasons, this is neither the Go name for the enum,
+ // nor the protobuf name for the enum.
+ Enum string // Deprecated: Do not use.
+ // Weak contains the full name of the weakly referenced message.
+ Weak string
+ // Wire is a string representation of the wire type.
+ Wire string
+ // WireType is the protobuf wire type for the field.
+ WireType int
+ // Tag is the protobuf field number.
+ Tag int
+ // Required reports whether this is a required field.
+ Required bool
+ // Optional reports whether this is a optional field.
+ Optional bool
+ // Repeated reports whether this is a repeated field.
+ Repeated bool
+ // Packed reports whether this is a packed repeated field of scalars.
+ Packed bool
+ // Proto3 reports whether this field operates under the proto3 syntax.
+ Proto3 bool
+ // Oneof reports whether this field belongs within a oneof.
+ Oneof bool
+
+ // Default is the default value in string form.
+ Default string
+ // HasDefault reports whether the field has a default value.
+ HasDefault bool
+
+ // MapKeyProp is the properties for the key field for a map field.
+ MapKeyProp *Properties
+ // MapValProp is the properties for the value field for a map field.
+ MapValProp *Properties
+}
+
+// OneofProperties represents the type information for a protobuf oneof.
+//
+// Deprecated: Do not use.
+type OneofProperties struct {
+ // Type is a pointer to the generated wrapper type for the field value.
+ // This is nil for messages that are not in the open-struct API.
+ Type reflect.Type
+ // Field is the index into StructProperties.Prop for the containing oneof.
+ Field int
+ // Prop is the properties for the field.
+ Prop *Properties
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += "," + strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != "" {
+ s += ",json=" + p.JSONName
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if len(p.Weak) > 0 {
+ s += ",weak=" + p.Weak
+ }
+ if p.Proto3 {
+ s += ",proto3"
+ }
+ if p.Oneof {
+ s += ",oneof"
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(tag string) {
+ // For example: "bytes,49,opt,name=foo,def=hello!"
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ p.OrigName = s[len("name="):]
+ case strings.HasPrefix(s, "json="):
+ p.JSONName = s[len("json="):]
+ case strings.HasPrefix(s, "enum="):
+ p.Enum = s[len("enum="):]
+ case strings.HasPrefix(s, "weak="):
+ p.Weak = s[len("weak="):]
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ p.Tag = int(n)
+ case s == "opt":
+ p.Optional = true
+ case s == "req":
+ p.Required = true
+ case s == "rep":
+ p.Repeated = true
+ case s == "varint" || s == "zigzag32" || s == "zigzag64":
+ p.Wire = s
+ p.WireType = WireVarint
+ case s == "fixed32":
+ p.Wire = s
+ p.WireType = WireFixed32
+ case s == "fixed64":
+ p.Wire = s
+ p.WireType = WireFixed64
+ case s == "bytes":
+ p.Wire = s
+ p.WireType = WireBytes
+ case s == "group":
+ p.Wire = s
+ p.WireType = WireStartGroup
+ case s == "packed":
+ p.Packed = true
+ case s == "proto3":
+ p.Proto3 = true
+ case s == "oneof":
+ p.Oneof = true
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
+ p.HasDefault = true
+ p.Default, i = tag[len("def="):], len(tag)
+ }
+ tag = strings.TrimPrefix(tag[i:], ",")
+ }
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+//
+// Deprecated: Do not use.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+
+ if typ != nil && typ.Kind() == reflect.Map {
+ p.MapKeyProp = new(Properties)
+ p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
+ p.MapValProp = new(Properties)
+ p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
+ }
+}
+
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
+
+// GetProperties returns the list of properties for the type represented by t,
+// which must be a generated protocol buffer message in the open-struct API,
+// where protobuf message fields are represented by exported Go struct fields.
+//
+// Deprecated: Use protobuf reflection instead.
+func GetProperties(t reflect.Type) *StructProperties {
+ if p, ok := propertiesCache.Load(t); ok {
+ return p.(*StructProperties)
+ }
+ p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
+ return p.(*StructProperties)
+}
+
+func newProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+
+ var hasOneof bool
+ prop := new(StructProperties)
+
+ // Construct a list of properties for each field in the struct.
+ for i := 0; i < t.NumField(); i++ {
+ p := new(Properties)
+ f := t.Field(i)
+ tagField := f.Tag.Get("protobuf")
+ p.Init(f.Type, f.Name, tagField, &f)
+
+ tagOneof := f.Tag.Get("protobuf_oneof")
+ if tagOneof != "" {
+ hasOneof = true
+ p.OrigName = tagOneof
+ }
+
+ // Rename unrelated struct fields with the "XXX_" prefix since so much
+ // user code simply checks for this to exclude special fields.
+ if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
+ p.Name = "XXX_" + p.Name
+ p.OrigName = "XXX_" + p.OrigName
+ } else if p.Weak != "" {
+ p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
+ }
+
+ prop.Prop = append(prop.Prop, p)
+ }
+
+ // Construct a mapping of oneof field names to properties.
+ if hasOneof {
+ var oneofWrappers []interface{}
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
+ }
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
+ }
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
+ if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
+ oneofWrappers = m.ProtoMessageInfo().OneofWrappers
+ }
+ }
+
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, wrapper := range oneofWrappers {
+ p := &OneofProperties{
+ Type: reflect.ValueOf(wrapper).Type(), // *T
+ Prop: new(Properties),
+ }
+ f := p.Type.Elem().Field(0)
+ p.Prop.Name = f.Name
+ p.Prop.Parse(f.Tag.Get("protobuf"))
+
+ // Determine the struct field that contains this oneof.
+ // Each wrapper is assignable to exactly one parent field.
+ var foundOneof bool
+ for i := 0; i < t.NumField() && !foundOneof; i++ {
+ if p.Type.AssignableTo(t.Field(i).Type) {
+ p.Field = i
+ foundOneof = true
+ }
+ }
+ if !foundOneof {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+ prop.OneofTypes[p.Prop.OrigName] = p
+ }
+ }
+
+ return prop
+}
+
+func (sp *StructProperties) Len() int { return len(sp.Prop) }
+func (sp *StructProperties) Less(i, j int) bool { return false }
+func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
new file mode 100644
index 0000000..5aee89c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functionality for handling protocol buffer messages.
+// In particular, it provides marshaling and unmarshaling between a protobuf
+// message and the binary wire format.
+//
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
+// more information.
+//
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ ProtoPackageIsVersion1 = true
+ ProtoPackageIsVersion2 = true
+ ProtoPackageIsVersion3 = true
+ ProtoPackageIsVersion4 = true
+)
+
+// GeneratedEnum is any enum type generated by protoc-gen-go
+// which is a named int32 kind.
+// This type exists for documentation purposes.
+type GeneratedEnum interface{}
+
+// GeneratedMessage is any message type generated by protoc-gen-go
+// which is a pointer to a named struct kind.
+// This type exists for documentation purposes.
+type GeneratedMessage interface{}
+
+// Message is a protocol buffer message.
+//
+// This is the v1 version of the message interface and is marginally better
+// than an empty interface as it lacks any method to programatically interact
+// with the contents of the message.
+//
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
+// exposes protobuf reflection as a first-class feature of the interface.
+//
+// To convert a v1 message to a v2 message, use the MessageV2 function.
+// To convert a v2 message to a v1 message, use the MessageV1 function.
+type Message = protoiface.MessageV1
+
+// MessageV1 converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
+ return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2 converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2(m GeneratedMessage) protoV2.Message {
+ return protoimpl.X.ProtoMessageV2Of(m)
+}
+
+// MessageReflect returns a reflective view for a message.
+// It returns nil if m is nil.
+func MessageReflect(m Message) protoreflect.Message {
+ return protoimpl.X.MessageOf(m)
+}
+
+// Marshaler is implemented by messages that can marshal themselves.
+// This interface is used by the following functions: Size, Marshal,
+// Buffer.Marshal, and Buffer.EncodeMessage.
+//
+// Deprecated: Do not implement.
+type Marshaler interface {
+ // Marshal formats the encoded bytes of the message.
+ // It should be deterministic and emit valid protobuf wire data.
+ // The caller takes ownership of the returned buffer.
+ Marshal() ([]byte, error)
+}
+
+// Unmarshaler is implemented by messages that can unmarshal themselves.
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
+//
+// Deprecated: Do not implement.
+type Unmarshaler interface {
+ // Unmarshal parses the encoded bytes of the protobuf wire input.
+ // The provided buffer is only valid for during method call.
+ // It should not reset the receiver message.
+ Unmarshal([]byte) error
+}
+
+// Merger is implemented by messages that can merge themselves.
+// This interface is used by the following functions: Clone and Merge.
+//
+// Deprecated: Do not implement.
+type Merger interface {
+ // Merge merges the contents of src into the receiver message.
+ // It clones all data structures in src such that it aliases no mutable
+ // memory referenced by src.
+ Merge(src Message)
+}
+
+// RequiredNotSetError is an error type returned when
+// marshaling or unmarshaling a message with missing required fields.
+type RequiredNotSetError struct {
+ err error
+}
+
+func (e *RequiredNotSetError) Error() string {
+ if e.err != nil {
+ return e.err.Error()
+ }
+ return "proto: required field not set"
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+func checkRequiredNotSet(m protoV2.Message) error {
+ if err := protoV2.CheckInitialized(m); err != nil {
+ return &RequiredNotSetError{err: err}
+ }
+ return nil
+}
+
+// Clone returns a deep copy of src.
+func Clone(src Message) Message {
+ return MessageV1(protoV2.Clone(MessageV2(src)))
+}
+
+// Merge merges src into dst, which must be messages of the same type.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+func Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they are the same protobuf message type,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ return protoV2.Equal(MessageV2(x), MessageV2(y))
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+ ms, ok := md.(interface{ IsMessageSet() bool })
+ return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
new file mode 100644
index 0000000..066b432
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -0,0 +1,317 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// filePath is the path to the proto source file.
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
+
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
+type fileDescGZIP = []byte
+
+var fileCache sync.Map // map[filePath]fileDescGZIP
+
+// RegisterFile is called from generated code to register the compressed
+// FileDescriptorProto with the file path for a proto source file.
+//
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
+func RegisterFile(s filePath, d fileDescGZIP) {
+ // Decompress the descriptor.
+ zr, err := gzip.NewReader(bytes.NewReader(d))
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+ b, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+
+ // Construct a protoreflect.FileDescriptor from the raw descriptor.
+ // Note that DescBuilder.Build automatically registers the constructed
+ // file descriptor with the v2 registry.
+ protoimpl.DescBuilder{RawDescriptor: b}.Build()
+
+ // Locally cache the raw descriptor form for the file.
+ fileCache.Store(s, d)
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
+// for a proto source file. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
+func FileDescriptor(s filePath) fileDescGZIP {
+ if v, ok := fileCache.Load(s); ok {
+ return v.(fileDescGZIP)
+ }
+
+ // Find the descriptor in the v2 registry.
+ var b []byte
+ if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
+ b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
+ }
+
+ // Locally cache the raw descriptor form for the file.
+ if len(b) > 0 {
+ v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
+ return v.(fileDescGZIP)
+ }
+ return nil
+}
+
+// enumName is the name of an enum. For historical reasons, the enum name is
+// neither the full Go name nor the full protobuf name of the enum.
+// The name is the dot-separated combination of just the proto package that the
+// enum is declared within followed by the Go type name of the generated enum.
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
+
+// enumsByName maps enum values by name to their numeric counterpart.
+type enumsByName = map[string]int32
+
+// enumsByNumber maps enum values by number to their name counterpart.
+type enumsByNumber = map[int32]string
+
+var enumCache sync.Map // map[enumName]enumsByName
+var numFilesCache sync.Map // map[protoreflect.FullName]int
+
+// RegisterEnum is called from the generated code to register the mapping of
+// enum value names to enum numbers for the enum identified by s.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
+ if _, ok := enumCache.Load(s); ok {
+ panic("proto: duplicate enum registered: " + s)
+ }
+ enumCache.Store(s, m)
+
+ // This does not forward registration to the v2 registry since this API
+ // lacks sufficient information to construct a complete v2 enum descriptor.
+}
+
+// EnumValueMap returns the mapping from enum value names to enum numbers for
+// the enum of the given name. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
+func EnumValueMap(s enumName) enumsByName {
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+
+ // Check whether the cache is stale. If the number of files in the current
+ // package differs, then it means that some enums may have been recently
+ // registered upstream that we do not know about.
+ var protoPkg protoreflect.FullName
+ if i := strings.LastIndexByte(s, '.'); i >= 0 {
+ protoPkg = protoreflect.FullName(s[:i])
+ }
+ v, _ := numFilesCache.Load(protoPkg)
+ numFiles, _ := v.(int)
+ if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
+ return nil // cache is up-to-date; was not found earlier
+ }
+
+ // Update the enum cache for all enums declared in the given proto package.
+ numFiles = 0
+ protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
+ walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
+ name := protoimpl.X.LegacyEnumName(ed)
+ if _, ok := enumCache.Load(name); !ok {
+ m := make(enumsByName)
+ evs := ed.Values()
+ for i := evs.Len() - 1; i >= 0; i-- {
+ ev := evs.Get(i)
+ m[string(ev.Name())] = int32(ev.Number())
+ }
+ enumCache.LoadOrStore(name, m)
+ }
+ })
+ numFiles++
+ return true
+ })
+ numFilesCache.Store(protoPkg, numFiles)
+
+ // Check cache again for enum map.
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+ return nil
+}
+
+// walkEnums recursively walks all enums declared in d.
+func walkEnums(d interface {
+ Enums() protoreflect.EnumDescriptors
+ Messages() protoreflect.MessageDescriptors
+}, f func(protoreflect.EnumDescriptor)) {
+ eds := d.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ }
+ mds := d.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ walkEnums(mds.Get(i), f)
+ }
+}
+
+// messageName is the full name of protobuf message.
+type messageName = string
+
+var messageTypeCache sync.Map // map[messageName]reflect.Type
+
+// RegisterType is called from generated code to register the message Go type
+// for a message of the given name.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
+func RegisterType(m Message, s messageName) {
+ mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
+ if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
+ panic(err)
+ }
+ messageTypeCache.Store(s, reflect.TypeOf(m))
+}
+
+// RegisterMapType is called from generated code to register the Go map type
+// for a protobuf message representing a map entry.
+//
+// Deprecated: Do not use.
+func RegisterMapType(m interface{}, s messageName) {
+ t := reflect.TypeOf(m)
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid map kind: %v", t))
+ }
+ if _, ok := messageTypeCache.Load(s); ok {
+ panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
+ }
+ messageTypeCache.Store(s, t)
+}
+
+// MessageType returns the message type for a named message.
+// It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
+func MessageType(s messageName) reflect.Type {
+ if v, ok := messageTypeCache.Load(s); ok {
+ return v.(reflect.Type)
+ }
+
+ // Derive the message type from the v2 registry.
+ var t reflect.Type
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
+ t = messageGoType(mt)
+ }
+
+ // If we could not get a concrete type, it is possible that it is a
+ // pseudo-message for a map entry.
+ if t == nil {
+ d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
+ if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
+ kt := goTypeForField(md.Fields().ByNumber(1))
+ vt := goTypeForField(md.Fields().ByNumber(2))
+ t = reflect.MapOf(kt, vt)
+ }
+ }
+
+ // Locally cache the message type for the given name.
+ if t != nil {
+ v, _ := messageTypeCache.LoadOrStore(s, t)
+ return v.(reflect.Type)
+ }
+ return nil
+}
+
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
+ switch k := fd.Kind(); k {
+ case protoreflect.EnumKind:
+ if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
+ return enumGoType(et)
+ }
+ return reflect.TypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
+ return messageGoType(mt)
+ }
+ return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
+ default:
+ return reflect.TypeOf(fd.Default().Interface())
+ }
+}
+
+func enumGoType(et protoreflect.EnumType) reflect.Type {
+ return reflect.TypeOf(et.New(0))
+}
+
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
+ return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
+}
+
+// MessageName returns the full protobuf name for the given message type.
+//
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
+func MessageName(m Message) messageName {
+ if m == nil {
+ return ""
+ }
+ if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
+ return m.XXX_MessageName()
+ }
+ return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
+}
+
+// RegisterExtension is called from the generated code to register
+// the extension descriptor.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
+func RegisterExtension(d *ExtensionDesc) {
+ if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
+ panic(err)
+ }
+}
+
+type extensionsByNumber = map[int32]*ExtensionDesc
+
+var extensionCache sync.Map // map[messageName]extensionsByNumber
+
+// RegisteredExtensions returns a map of the registered extensions for the
+// provided protobuf message, indexed by the extension field number.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
+func RegisteredExtensions(m Message) extensionsByNumber {
+ // Check whether the cache is stale. If the number of extensions for
+ // the given message differs, then it means that some extensions were
+ // recently registered upstream that we do not know about.
+ s := MessageName(m)
+ v, _ := extensionCache.Load(s)
+ xs, _ := v.(extensionsByNumber)
+ if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
+ return xs // cache is up-to-date
+ }
+
+ // Cache is stale, re-compute the extensions map.
+ xs = make(extensionsByNumber)
+ protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
+ if xd, ok := xt.(*ExtensionDesc); ok {
+ xs[int32(xt.TypeDescriptor().Number())] = xd
+ } else {
+ // TODO: This implies that the protoreflect.ExtensionType is a
+ // custom type not generated by protoc-gen-go. We could try and
+ // convert the type to an ExtensionDesc.
+ }
+ return true
+ })
+ extensionCache.Store(s, xs)
+ return xs
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
new file mode 100644
index 0000000..47eb3e4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_decode.go
@@ -0,0 +1,801 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextUnmarshalV2 = false
+
+// ParseError is returned by UnmarshalText.
+type ParseError struct {
+ Message string
+
+ // Deprecated: Do not use.
+ Line, Offset int
+}
+
+func (e *ParseError) Error() string {
+ if wrapTextUnmarshalV2 {
+ return e.Message
+ }
+ if e.Line == 1 {
+ return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
+ }
+ return fmt.Sprintf("line %d: %v", e.Line, e.Message)
+}
+
+// UnmarshalText parses a proto text formatted string into m.
+func UnmarshalText(s string, m Message) error {
+ if u, ok := m.(encoding.TextUnmarshaler); ok {
+ return u.UnmarshalText([]byte(s))
+ }
+
+ m.Reset()
+ mi := MessageV2(m)
+
+ if wrapTextUnmarshalV2 {
+ err := prototext.UnmarshalOptions{
+ AllowPartial: true,
+ }.Unmarshal([]byte(s), mi)
+ if err != nil {
+ return &ParseError{Message: err.Error()}
+ }
+ return checkRequiredNotSet(mi)
+ } else {
+ if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
+ return err
+ }
+ return checkRequiredNotSet(mi)
+ }
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ seen := make(map[protoreflect.FieldNumber]bool)
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := protoreflect.Name(tok.value)
+ fd := fds.ByName(name)
+ switch {
+ case fd == nil:
+ gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
+ if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
+ fd = gd
+ }
+ case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
+ fd = nil
+ case fd.IsWeak() && fd.Message().IsPlaceholder():
+ fd = nil
+ }
+ if fd == nil {
+ typeName := string(md.FullName())
+ if m, ok := m.Interface().(Message); ok {
+ t := reflect.TypeOf(m)
+ if t.Kind() == reflect.Ptr {
+ typeName = t.Elem().String()
+ }
+ }
+ return p.errorf("unknown field name %q in %v", name, typeName)
+ }
+ if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
+ }
+ if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
+ return p.errorf("non-repeated field %q was repeated", fd.Name())
+ }
+ seen[fd.Number()] = true
+
+ // Consume any colon.
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ if v, err = p.unmarshalValue(v, fd); err != nil {
+ return err
+ }
+ m.Set(fd, v)
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
+ name, err := p.consumeExtensionOrAnyName()
+ if err != nil {
+ return err
+ }
+
+ // If it contains a slash, it's an Any type URL.
+ if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
+ if err != nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
+ }
+ m2 := mt.New()
+ if err := p.unmarshalMessage(m2, terminator); err != nil {
+ return err
+ }
+ b, err := protoV2.Marshal(m2.Interface())
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
+ }
+
+ urlFD := m.Descriptor().Fields().ByName("type_url")
+ valFD := m.Descriptor().Fields().ByName("value")
+ if seen[urlFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
+ }
+ if seen[valFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
+ }
+ m.Set(urlFD, protoreflect.ValueOfString(name))
+ m.Set(valFD, protoreflect.ValueOfBytes(b))
+ seen[urlFD.Number()] = true
+ seen[valFD.Number()] = true
+ return nil
+ }
+
+ xname := protoreflect.FullName(name)
+ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+ if xt == nil && isMessageSet(m.Descriptor()) {
+ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+ }
+ if xt == nil {
+ return p.errorf("unrecognized extension %q", name)
+ }
+ fd := xt.TypeDescriptor()
+ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+ return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
+ }
+
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ v, err = p.unmarshalValue(v, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ return p.consumeOptionalSeparator()
+}
+
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch {
+ case fd.IsList():
+ lv := v.List()
+ var err error
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return v, p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return v, nil
+ }
+
+ // One value of the repeated field.
+ p.back()
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+ return v, nil
+ case fd.IsMap():
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order.
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ keyFD := fd.MapKey()
+ valFD := fd.MapValue()
+
+ mv := v.Map()
+ kv := keyFD.Default()
+ vv := mv.NewValue()
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ var err error
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return v, err
+ }
+ if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ case "value":
+ if err := p.checkForColon(valFD); err != nil {
+ return v, err
+ }
+ if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ default:
+ p.back()
+ return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+ mv.Set(kv.MapKey(), vv)
+ return v, nil
+ default:
+ p.back()
+ return p.unmarshalSingularValue(v, fd)
+ }
+}
+
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ switch tok.value {
+ case "true", "1", "t", "True":
+ return protoreflect.ValueOfBool(true), nil
+ case "false", "0", "f", "False":
+ return protoreflect.ValueOfBool(false), nil
+ }
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfUint32(uint32(x)), nil
+ }
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfUint64(uint64(x)), nil
+ }
+ case protoreflect.FloatKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 32); err == nil {
+ return protoreflect.ValueOfFloat32(float32(x)), nil
+ }
+ case protoreflect.DoubleKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 64); err == nil {
+ return protoreflect.ValueOfFloat64(float64(x)), nil
+ }
+ case protoreflect.StringKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfString(tok.unquoted), nil
+ }
+ case protoreflect.BytesKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
+ }
+ case protoreflect.EnumKind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
+ }
+ vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
+ if vd != nil {
+ return protoreflect.ValueOfEnum(vd.Number()), nil
+ }
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ err := p.unmarshalMessage(v.Message(), terminator)
+ return v, err
+ default:
+ panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+ }
+ return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ if fd.Message() == nil {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
+// the following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in unmarshalMessage to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+var errBadUTF8 = errors.New("proto: bad UTF-8")
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(rune(i)), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
new file mode 100644
index 0000000..a31134e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_encode.go
@@ -0,0 +1,560 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextMarshalV2 = false
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line)
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes the proto text format of m to w.
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
+ b, err := tm.marshal(m)
+ if len(b) > 0 {
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// Text returns a proto text formatted string of m.
+func (tm *TextMarshaler) Text(m Message) string {
+ b, _ := tm.marshal(m)
+ return string(b)
+}
+
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return []byte(""), nil
+ }
+
+ if wrapTextMarshalV2 {
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ return m.MarshalText()
+ }
+
+ opts := prototext.MarshalOptions{
+ AllowPartial: true,
+ EmitUnknown: true,
+ }
+ if !tm.Compact {
+ opts.Indent = " "
+ }
+ if !tm.ExpandAny {
+ opts.Resolver = (*protoregistry.Types)(nil)
+ }
+ return opts.Marshal(mr.Interface())
+ } else {
+ w := &textWriter{
+ compact: tm.Compact,
+ expandAny: tm.ExpandAny,
+ complete: true,
+ }
+
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ b, err := m.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ w.Write(b)
+ return w.buf, nil
+ }
+
+ err := w.writeMessage(mr)
+ return w.buf, err
+ }
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// MarshalText writes the proto text format of m to w.
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
+
+// MarshalTextString returns a proto text formatted string of m.
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
+
+// CompactText writes the compact proto text format of m to w.
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
+
+// CompactTextString returns a compact proto text formatted string of m.
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
+
+var (
+ newline = []byte("\n")
+ endBraceNewline = []byte("}\n")
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ compact bool // same as TextMarshaler.Compact
+ expandAny bool // same as TextMarshaler.ExpandAny
+ complete bool // whether the current position is a complete line
+ indent int // indentation level; never negative
+ buf []byte
+}
+
+func (w *textWriter) Write(p []byte) (n int, _ error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, p...)
+ w.complete = false
+ return len(p), nil
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ w.buf = append(w.buf, ' ')
+ n++
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ if i+1 < len(frags) {
+ w.buf = append(w.buf, '\n')
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, c)
+ w.complete = c == '\n'
+ return nil
+}
+
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+
+ if fd.Kind() != protoreflect.GroupKind {
+ w.buf = append(w.buf, fd.Name()...)
+ w.WriteByte(':')
+ } else {
+ // Use message type name for group field name.
+ w.buf = append(w.buf, fd.Message().Name()...)
+ }
+
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
+ md := m.Descriptor()
+ fdURL := md.Fields().ByName("type_url")
+ fdVal := md.Fields().ByName("value")
+
+ url := m.Get(fdURL).String()
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return false, nil
+ }
+
+ b := m.Get(fdVal).Bytes()
+ m2 := mt.New()
+ if err := proto.Unmarshal(b, m2.Interface()); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ if requiresQuotes(url) {
+ w.writeQuotedString(url)
+ } else {
+ w.Write([]byte(url))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.indent++
+ }
+ if err := w.writeMessage(m2); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.indent--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if w.expandAny && md.FullName() == "google.protobuf.Any" {
+ if canExpand, err := w.writeProto3Any(m); canExpand {
+ return err
+ }
+ }
+
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ } else {
+ i++
+ }
+ if fd == nil || !m.Has(fd) {
+ continue
+ }
+
+ switch {
+ case fd.IsList():
+ lv := m.Get(fd).List()
+ for j := 0; j < lv.Len(); j++ {
+ w.writeName(fd)
+ v := lv.Get(j)
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ case fd.IsMap():
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := m.Get(fd).Map()
+
+ type entry struct{ key, val protoreflect.Value }
+ var entries []entry
+ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ entries = append(entries, entry{k.Value(), v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ switch kfd.Kind() {
+ case protoreflect.BoolKind:
+ return !entries[i].key.Bool() && entries[j].key.Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return entries[i].key.Int() < entries[j].key.Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return entries[i].key.Uint() < entries[j].key.Uint()
+ case protoreflect.StringKind:
+ return entries[i].key.String() < entries[j].key.String()
+ default:
+ panic("invalid kind")
+ }
+ })
+ for _, entry := range entries {
+ w.writeName(fd)
+ w.WriteByte('<')
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ w.writeName(kfd)
+ if err := w.writeSingularValue(entry.key, kfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.writeName(vfd)
+ if err := w.writeSingularValue(entry.val, vfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.indent--
+ w.WriteByte('>')
+ w.WriteByte('\n')
+ }
+ default:
+ w.writeName(fd)
+ if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ }
+
+ if b := m.GetUnknown(); len(b) > 0 {
+ w.writeUnknownFields(b)
+ }
+ return w.writeExtensions(m)
+}
+
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ switch fd.Kind() {
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ switch vf := v.Float(); {
+ case math.IsInf(vf, +1):
+ w.Write(posInf)
+ case math.IsInf(vf, -1):
+ w.Write(negInf)
+ case math.IsNaN(vf):
+ w.Write(nan)
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ case protoreflect.StringKind:
+ // NOTE: This does not validate UTF-8 for historical reasons.
+ w.writeQuotedString(string(v.String()))
+ case protoreflect.BytesKind:
+ w.writeQuotedString(string(v.Bytes()))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var bra, ket byte = '<', '>'
+ if fd.Kind() == protoreflect.GroupKind {
+ bra, ket = '{', '}'
+ }
+ w.WriteByte(bra)
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ m := v.Message()
+ if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
+ b, err := m2.MarshalText()
+ if err != nil {
+ return err
+ }
+ w.Write(b)
+ } else {
+ w.writeMessage(m)
+ }
+ w.indent--
+ w.WriteByte(ket)
+ case protoreflect.EnumKind:
+ if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
+ fmt.Fprint(w, ev.Name())
+ } else {
+ fmt.Fprint(w, v.Enum())
+ }
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ return nil
+}
+
+// writeQuotedString writes a quoted string in the protocol buffer text format.
+func (w *textWriter) writeQuotedString(s string) {
+ w.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; c {
+ case '\n':
+ w.buf = append(w.buf, `\n`...)
+ case '\r':
+ w.buf = append(w.buf, `\r`...)
+ case '\t':
+ w.buf = append(w.buf, `\t`...)
+ case '"':
+ w.buf = append(w.buf, `\"`...)
+ case '\\':
+ w.buf = append(w.buf, `\\`...)
+ default:
+ if isPrint := c >= 0x20 && c < 0x7f; isPrint {
+ w.buf = append(w.buf, c)
+ } else {
+ w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ w.WriteByte('"')
+}
+
+func (w *textWriter) writeUnknownFields(b []byte) {
+ if !w.compact {
+ fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
+ }
+
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+
+ if wtyp == protowire.EndGroupType {
+ w.indent--
+ w.Write(endBraceNewline)
+ continue
+ }
+ fmt.Fprint(w, num)
+ if wtyp != protowire.StartGroupType {
+ w.WriteByte(':')
+ }
+ if !w.compact || wtyp == protowire.StartGroupType {
+ w.WriteByte(' ')
+ }
+ switch wtyp {
+ case protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed32Type:
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed64Type:
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.BytesType:
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprintf(w, "%q", v)
+ case protowire.StartGroupType:
+ w.WriteByte('{')
+ w.indent++
+ default:
+ fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
+ }
+ w.WriteByte('\n')
+ }
+}
+
+// writeExtensions writes all the extensions in m.
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if md.ExtensionRanges().Len() == 0 {
+ return nil
+ }
+
+ type ext struct {
+ desc protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var exts []ext
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ exts = append(exts, ext{fd, v})
+ }
+ return true
+ })
+ sort.Slice(exts, func(i, j int) bool {
+ return exts[i].desc.Number() < exts[j].desc.Number()
+ })
+
+ for _, ext := range exts {
+ // For message set, use the name of the message as the extension name.
+ name := string(ext.desc.FullName())
+ if isMessageSet(ext.desc.ContainingMessage()) {
+ name = strings.TrimSuffix(name, ".message_set_extension")
+ }
+
+ if !ext.desc.IsList() {
+ if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
+ return err
+ }
+ } else {
+ lv := ext.val.List()
+ for i := 0; i < lv.Len(); i++ {
+ if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ fmt.Fprintf(w, "[%s]:", name)
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ for i := 0; i < w.indent*2; i++ {
+ w.buf = append(w.buf, ' ')
+ }
+ w.complete = false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
new file mode 100644
index 0000000..d7c28da
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wire.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ if m == nil {
+ return 0
+ }
+ mi := MessageV2(m)
+ return protoV2.Size(mi)
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ b, err := marshalAppend(nil, m, false)
+ if b == nil {
+ b = zeroBytes
+ }
+ return b, err
+}
+
+var zeroBytes = make([]byte, 0, 0)
+
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return nil, ErrNil
+ }
+ mi := MessageV2(m)
+ nbuf, err := protoV2.MarshalOptions{
+ Deterministic: deterministic,
+ AllowPartial: true,
+ }.MarshalAppend(buf, mi)
+ if err != nil {
+ return buf, err
+ }
+ if len(buf) == len(nbuf) {
+ if !mi.ProtoReflect().IsValid() {
+ return buf, ErrNil
+ }
+ }
+ return nbuf, checkRequiredNotSet(mi)
+}
+
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
+//
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
+// removed. Use UnmarshalMerge to preserve and append to existing data.
+func Unmarshal(b []byte, m Message) error {
+ m.Reset()
+ return UnmarshalMerge(b, m)
+}
+
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
+func UnmarshalMerge(b []byte, m Message) error {
+ mi := MessageV2(m)
+ out, err := protoV2.UnmarshalOptions{
+ AllowPartial: true,
+ Merge: true,
+ }.UnmarshalState(protoiface.UnmarshalInput{
+ Buf: b,
+ Message: mi.ProtoReflect(),
+ })
+ if err != nil {
+ return err
+ }
+ if out.Flags&protoiface.UnmarshalInitialized > 0 {
+ return nil
+ }
+ return checkRequiredNotSet(mi)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
new file mode 100644
index 0000000..398e348
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wrappers.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int stores v in a new int32 value and returns a pointer to it.
+//
+// Deprecated: Use Int32 instead.
+func Int(v int) *int32 { return Int32(int32(v)) }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..a76f807
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,64 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+
+package timestamp
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/timestamp.proto.
+
+type Timestamp = timestamppb.Timestamp
+
+var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
+ 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
+ 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
+func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore
new file mode 100644
index 0000000..cd3fcd1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.idea/
+*.iml
diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS
new file mode 100644
index 0000000..1931f40
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/AUTHORS
@@ -0,0 +1,9 @@
+# This is the official list of Gorilla WebSocket authors for copyright
+# purposes.
+#
+# Please keep the list sorted.
+
+Gary Burd
+Google LLC (https://opensource.google.com/)
+Joachim Bauch
+
diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE
new file mode 100644
index 0000000..9171c97
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md
new file mode 100644
index 0000000..19aa2e7
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/README.md
@@ -0,0 +1,64 @@
+# Gorilla WebSocket
+
+[](https://godoc.org/github.com/gorilla/websocket)
+[](https://circleci.com/gh/gorilla/websocket)
+
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
+[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+
+### Documentation
+
+* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
+* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
+* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
+* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
+* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+
+### Status
+
+The Gorilla WebSocket package provides a complete and tested implementation of
+the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
+package API is stable.
+
+### Installation
+
+ go get github.com/gorilla/websocket
+
+### Protocol Compliance
+
+The Gorilla WebSocket package passes the server tests in the [Autobahn Test
+Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
+subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
+
+### Gorilla WebSocket compared with other packages
+
+
+
+Notes:
+
+1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
+2. The application can get the type of a received data message by implementing
+ a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
+ function.
+3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
+ Read returns when the input buffer is full or a frame boundary is
+ encountered. Each call to Write sends a single frame message. The Gorilla
+ io.Reader and io.WriteCloser operate on a single WebSocket message.
+
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
new file mode 100644
index 0000000..962c06a
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -0,0 +1,395 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// ErrBadHandshake is returned when the server response to opening handshake is
+// invalid.
+var ErrBadHandshake = errors.New("websocket: bad handshake")
+
+var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
+
+// NewClient creates a new client connection using the given net connection.
+// The URL u specifies the host and request URI. Use requestHeader to specify
+// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
+// (Cookie). Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etc.
+//
+// Deprecated: Use Dialer instead.
+func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
+ d := Dialer{
+ ReadBufferSize: readBufSize,
+ WriteBufferSize: writeBufSize,
+ NetDial: func(net, addr string) (net.Conn, error) {
+ return netConn, nil
+ },
+ }
+ return d.Dial(u.String(), requestHeader)
+}
+
+// A Dialer contains options for connecting to WebSocket server.
+type Dialer struct {
+ // NetDial specifies the dial function for creating TCP connections. If
+ // NetDial is nil, net.Dial is used.
+ NetDial func(network, addr string) (net.Conn, error)
+
+ // NetDialContext specifies the dial function for creating TCP connections. If
+ // NetDialContext is nil, net.DialContext is used.
+ NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*http.Request) (*url.URL, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with tls.Client.
+ // If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then a useful default size is used. The I/O buffer sizes
+ // do not limit the size of the messages that can be sent or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the client's requested subprotocols.
+ Subprotocols []string
+
+ // EnableCompression specifies if the client should attempt to negotiate
+ // per message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+
+ // Jar specifies the cookie jar.
+ // If Jar is nil, cookies are not sent in requests and ignored
+ // in responses.
+ Jar http.CookieJar
+}
+
+// Dial creates a new client connection by calling DialContext with a background context.
+func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ return d.DialContext(context.Background(), urlStr, requestHeader)
+}
+
+var errMalformedURL = errors.New("malformed ws or wss URL")
+
+func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
+ hostPort = u.Host
+ hostNoPort = u.Host
+ if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
+ hostNoPort = hostNoPort[:i]
+ } else {
+ switch u.Scheme {
+ case "wss":
+ hostPort += ":443"
+ case "https":
+ hostPort += ":443"
+ default:
+ hostPort += ":80"
+ }
+ }
+ return hostPort, hostNoPort
+}
+
+// DefaultDialer is a dialer with all fields set to the default values.
+var DefaultDialer = &Dialer{
+ Proxy: http.ProxyFromEnvironment,
+ HandshakeTimeout: 45 * time.Second,
+}
+
+// nilDialer is dialer to use when receiver is nil.
+var nilDialer = *DefaultDialer
+
+// DialContext creates a new client connection. Use requestHeader to specify the
+// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
+// Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// The context will be used in the request and in the Dialer.
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etcetera. The response body may not contain the entire response and does not
+// need to be closed by the application.
+func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ if d == nil {
+ d = &nilDialer
+ }
+
+ challengeKey, err := generateChallengeKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ default:
+ return nil, nil, errMalformedURL
+ }
+
+ if u.User != nil {
+ // User name and password are not allowed in websocket URIs.
+ return nil, nil, errMalformedURL
+ }
+
+ req := &http.Request{
+ Method: "GET",
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: u.Host,
+ }
+ req = req.WithContext(ctx)
+
+ // Set the cookies present in the cookie jar of the dialer
+ if d.Jar != nil {
+ for _, cookie := range d.Jar.Cookies(u) {
+ req.AddCookie(cookie)
+ }
+ }
+
+ // Set the request headers using the capitalization for names and values in
+ // RFC examples. Although the capitalization shouldn't matter, there are
+ // servers that depend on it. The Header.Set method is not used because the
+ // method canonicalizes the header names.
+ req.Header["Upgrade"] = []string{"websocket"}
+ req.Header["Connection"] = []string{"Upgrade"}
+ req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
+ req.Header["Sec-WebSocket-Version"] = []string{"13"}
+ if len(d.Subprotocols) > 0 {
+ req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
+ }
+ for k, vs := range requestHeader {
+ switch {
+ case k == "Host":
+ if len(vs) > 0 {
+ req.Host = vs[0]
+ }
+ case k == "Upgrade" ||
+ k == "Connection" ||
+ k == "Sec-Websocket-Key" ||
+ k == "Sec-Websocket-Version" ||
+ k == "Sec-Websocket-Extensions" ||
+ (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
+ return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
+ case k == "Sec-Websocket-Protocol":
+ req.Header["Sec-WebSocket-Protocol"] = vs
+ default:
+ req.Header[k] = vs
+ }
+ }
+
+ if d.EnableCompression {
+ req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
+ }
+
+ if d.HandshakeTimeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
+ defer cancel()
+ }
+
+ // Get network dial function.
+ var netDial func(network, add string) (net.Conn, error)
+
+ if d.NetDialContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialContext(ctx, network, addr)
+ }
+ } else if d.NetDial != nil {
+ netDial = d.NetDial
+ } else {
+ netDialer := &net.Dialer{}
+ netDial = func(network, addr string) (net.Conn, error) {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ }
+
+ // If needed, wrap the dial function to set the connection deadline.
+ if deadline, ok := ctx.Deadline(); ok {
+ forwardDial := netDial
+ netDial = func(network, addr string) (net.Conn, error) {
+ c, err := forwardDial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ err = c.SetDeadline(deadline)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+ }
+ }
+
+ // If needed, wrap the dial function to connect through a proxy.
+ if d.Proxy != nil {
+ proxyURL, err := d.Proxy(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if proxyURL != nil {
+ dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
+ if err != nil {
+ return nil, nil, err
+ }
+ netDial = dialer.Dial
+ }
+ }
+
+ hostPort, hostNoPort := hostPortNoPort(u)
+ trace := httptrace.ContextClientTrace(ctx)
+ if trace != nil && trace.GetConn != nil {
+ trace.GetConn(hostPort)
+ }
+
+ netConn, err := netDial("tcp", hostPort)
+ if trace != nil && trace.GotConn != nil {
+ trace.GotConn(httptrace.GotConnInfo{
+ Conn: netConn,
+ })
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ defer func() {
+ if netConn != nil {
+ netConn.Close()
+ }
+ }()
+
+ if u.Scheme == "https" {
+ cfg := cloneTLSConfig(d.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = hostNoPort
+ }
+ tlsConn := tls.Client(netConn, cfg)
+ netConn = tlsConn
+
+ var err error
+ if trace != nil {
+ err = doHandshakeWithTrace(trace, tlsConn, cfg)
+ } else {
+ err = doHandshake(tlsConn, cfg)
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
+
+ if err := req.Write(netConn); err != nil {
+ return nil, nil, err
+ }
+
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
+ trace.GotFirstResponseByte()
+ }
+ }
+
+ resp, err := http.ReadResponse(conn.br, req)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if d.Jar != nil {
+ if rc := resp.Cookies(); len(rc) > 0 {
+ d.Jar.SetCookies(u, rc)
+ }
+ }
+
+ if resp.StatusCode != 101 ||
+ !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
+ !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
+ resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
+ // Before closing the network connection on return from this
+ // function, slurp up some of the response to aid application
+ // debugging.
+ buf := make([]byte, 1024)
+ n, _ := io.ReadFull(resp.Body, buf)
+ resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+ return nil, resp, ErrBadHandshake
+ }
+
+ for _, ext := range parseExtensions(resp.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ _, snct := ext["server_no_context_takeover"]
+ _, cnct := ext["client_no_context_takeover"]
+ if !snct || !cnct {
+ return nil, resp, errInvalidCompression
+ }
+ conn.newCompressionWriter = compressNoContextTakeover
+ conn.newDecompressionReader = decompressNoContextTakeover
+ break
+ }
+
+ resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
+
+ netConn.SetDeadline(time.Time{})
+ netConn = nil // to avoid close in defer.
+ return conn, resp, nil
+}
+
+func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go
new file mode 100644
index 0000000..4f0d943
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client_clone.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return cfg.Clone()
+}
diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go
new file mode 100644
index 0000000..babb007
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go
@@ -0,0 +1,38 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+import "crypto/tls"
+
+// cloneTLSConfig clones all public fields except the fields
+// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
+// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
+// config in active use.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ }
+}
diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go
new file mode 100644
index 0000000..813ffb1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/compression.go
@@ -0,0 +1,148 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "compress/flate"
+ "errors"
+ "io"
+ "strings"
+ "sync"
+)
+
+const (
+ minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
+ maxCompressionLevel = flate.BestCompression
+ defaultCompressionLevel = 1
+)
+
+var (
+ flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
+ flateReaderPool = sync.Pool{New: func() interface{} {
+ return flate.NewReader(nil)
+ }}
+)
+
+func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
+ const tail =
+ // Add four bytes as specified in RFC
+ "\x00\x00\xff\xff" +
+ // Add final block to squelch unexpected EOF error from flate reader.
+ "\x01\x00\x00\xff\xff"
+
+ fr, _ := flateReaderPool.Get().(io.ReadCloser)
+ fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+ return &flateReadWrapper{fr}
+}
+
+func isValidCompressionLevel(level int) bool {
+ return minCompressionLevel <= level && level <= maxCompressionLevel
+}
+
+func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
+ p := &flateWriterPools[level-minCompressionLevel]
+ tw := &truncWriter{w: w}
+ fw, _ := p.Get().(*flate.Writer)
+ if fw == nil {
+ fw, _ = flate.NewWriter(tw, level)
+ } else {
+ fw.Reset(tw)
+ }
+ return &flateWriteWrapper{fw: fw, tw: tw, p: p}
+}
+
+// truncWriter is an io.Writer that writes all but the last four bytes of the
+// stream to another io.Writer.
+type truncWriter struct {
+ w io.WriteCloser
+ n int
+ p [4]byte
+}
+
+func (w *truncWriter) Write(p []byte) (int, error) {
+ n := 0
+
+ // fill buffer first for simplicity.
+ if w.n < len(w.p) {
+ n = copy(w.p[w.n:], p)
+ p = p[n:]
+ w.n += n
+ if len(p) == 0 {
+ return n, nil
+ }
+ }
+
+ m := len(p)
+ if m > len(w.p) {
+ m = len(w.p)
+ }
+
+ if nn, err := w.w.Write(w.p[:m]); err != nil {
+ return n + nn, err
+ }
+
+ copy(w.p[:], w.p[m:])
+ copy(w.p[len(w.p)-m:], p[len(p)-m:])
+ nn, err := w.w.Write(p[:len(p)-m])
+ return n + nn, err
+}
+
+type flateWriteWrapper struct {
+ fw *flate.Writer
+ tw *truncWriter
+ p *sync.Pool
+}
+
+func (w *flateWriteWrapper) Write(p []byte) (int, error) {
+ if w.fw == nil {
+ return 0, errWriteClosed
+ }
+ return w.fw.Write(p)
+}
+
+func (w *flateWriteWrapper) Close() error {
+ if w.fw == nil {
+ return errWriteClosed
+ }
+ err1 := w.fw.Flush()
+ w.p.Put(w.fw)
+ w.fw = nil
+ if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
+ return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
+ }
+ err2 := w.tw.w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+type flateReadWrapper struct {
+ fr io.ReadCloser
+}
+
+func (r *flateReadWrapper) Read(p []byte) (int, error) {
+ if r.fr == nil {
+ return 0, io.ErrClosedPipe
+ }
+ n, err := r.fr.Read(p)
+ if err == io.EOF {
+ // Preemptively place the reader back in the pool. This helps with
+ // scenarios where the application does not call NextReader() soon after
+ // this final read.
+ r.Close()
+ }
+ return n, err
+}
+
+func (r *flateReadWrapper) Close() error {
+ if r.fr == nil {
+ return io.ErrClosedPipe
+ }
+ err := r.fr.Close()
+ flateReaderPool.Put(r.fr)
+ r.fr = nil
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
new file mode 100644
index 0000000..ca46d2f
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -0,0 +1,1201 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ // Frame header byte 0 bits from Section 5.2 of RFC 6455
+ finalBit = 1 << 7
+ rsv1Bit = 1 << 6
+ rsv2Bit = 1 << 5
+ rsv3Bit = 1 << 4
+
+ // Frame header byte 1 bits from Section 5.2 of RFC 6455
+ maskBit = 1 << 7
+
+ maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
+ maxControlFramePayloadSize = 125
+
+ writeWait = time.Second
+
+ defaultReadBufferSize = 4096
+ defaultWriteBufferSize = 4096
+
+ continuationFrame = 0
+ noFrame = -1
+)
+
+// Close codes defined in RFC 6455, section 11.7.
+const (
+ CloseNormalClosure = 1000
+ CloseGoingAway = 1001
+ CloseProtocolError = 1002
+ CloseUnsupportedData = 1003
+ CloseNoStatusReceived = 1005
+ CloseAbnormalClosure = 1006
+ CloseInvalidFramePayloadData = 1007
+ ClosePolicyViolation = 1008
+ CloseMessageTooBig = 1009
+ CloseMandatoryExtension = 1010
+ CloseInternalServerErr = 1011
+ CloseServiceRestart = 1012
+ CloseTryAgainLater = 1013
+ CloseTLSHandshake = 1015
+)
+
+// The message types are defined in RFC 6455, section 11.8.
+const (
+ // TextMessage denotes a text data message. The text message payload is
+ // interpreted as UTF-8 encoded text data.
+ TextMessage = 1
+
+ // BinaryMessage denotes a binary data message.
+ BinaryMessage = 2
+
+ // CloseMessage denotes a close control message. The optional message
+ // payload contains a numeric code and text. Use the FormatCloseMessage
+ // function to format a close message payload.
+ CloseMessage = 8
+
+ // PingMessage denotes a ping control message. The optional message payload
+ // is UTF-8 encoded text.
+ PingMessage = 9
+
+ // PongMessage denotes a pong control message. The optional message payload
+ // is UTF-8 encoded text.
+ PongMessage = 10
+)
+
+// ErrCloseSent is returned when the application writes a message to the
+// connection after sending a close message.
+var ErrCloseSent = errors.New("websocket: close sent")
+
+// ErrReadLimit is returned when reading a message that is larger than the
+// read limit set for the connection.
+var ErrReadLimit = errors.New("websocket: read limit exceeded")
+
+// netError satisfies the net Error interface.
+type netError struct {
+ msg string
+ temporary bool
+ timeout bool
+}
+
+func (e *netError) Error() string { return e.msg }
+func (e *netError) Temporary() bool { return e.temporary }
+func (e *netError) Timeout() bool { return e.timeout }
+
+// CloseError represents a close message.
+type CloseError struct {
+ // Code is defined in RFC 6455, section 11.7.
+ Code int
+
+ // Text is the optional text payload.
+ Text string
+}
+
+func (e *CloseError) Error() string {
+ s := []byte("websocket: close ")
+ s = strconv.AppendInt(s, int64(e.Code), 10)
+ switch e.Code {
+ case CloseNormalClosure:
+ s = append(s, " (normal)"...)
+ case CloseGoingAway:
+ s = append(s, " (going away)"...)
+ case CloseProtocolError:
+ s = append(s, " (protocol error)"...)
+ case CloseUnsupportedData:
+ s = append(s, " (unsupported data)"...)
+ case CloseNoStatusReceived:
+ s = append(s, " (no status)"...)
+ case CloseAbnormalClosure:
+ s = append(s, " (abnormal closure)"...)
+ case CloseInvalidFramePayloadData:
+ s = append(s, " (invalid payload data)"...)
+ case ClosePolicyViolation:
+ s = append(s, " (policy violation)"...)
+ case CloseMessageTooBig:
+ s = append(s, " (message too big)"...)
+ case CloseMandatoryExtension:
+ s = append(s, " (mandatory extension missing)"...)
+ case CloseInternalServerErr:
+ s = append(s, " (internal server error)"...)
+ case CloseTLSHandshake:
+ s = append(s, " (TLS handshake error)"...)
+ }
+ if e.Text != "" {
+ s = append(s, ": "...)
+ s = append(s, e.Text...)
+ }
+ return string(s)
+}
+
+// IsCloseError returns boolean indicating whether the error is a *CloseError
+// with one of the specified codes.
+func IsCloseError(err error, codes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range codes {
+ if e.Code == code {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IsUnexpectedCloseError returns boolean indicating whether the error is a
+// *CloseError with a code not in the list of expected codes.
+func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range expectedCodes {
+ if e.Code == code {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var (
+ errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
+ errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
+ errBadWriteOpCode = errors.New("websocket: bad write message type")
+ errWriteClosed = errors.New("websocket: write closed")
+ errInvalidControlFrame = errors.New("websocket: invalid control frame")
+)
+
+func newMaskKey() [4]byte {
+ n := rand.Uint32()
+ return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+}
+
+func hideTempErr(err error) error {
+ if e, ok := err.(net.Error); ok && e.Temporary() {
+ err = &netError{msg: e.Error(), timeout: e.Timeout()}
+ }
+ return err
+}
+
+func isControl(frameType int) bool {
+ return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
+}
+
+func isData(frameType int) bool {
+ return frameType == TextMessage || frameType == BinaryMessage
+}
+
+var validReceivedCloseCodes = map[int]bool{
+ // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+
+ CloseNormalClosure: true,
+ CloseGoingAway: true,
+ CloseProtocolError: true,
+ CloseUnsupportedData: true,
+ CloseNoStatusReceived: false,
+ CloseAbnormalClosure: false,
+ CloseInvalidFramePayloadData: true,
+ ClosePolicyViolation: true,
+ CloseMessageTooBig: true,
+ CloseMandatoryExtension: true,
+ CloseInternalServerErr: true,
+ CloseServiceRestart: true,
+ CloseTryAgainLater: true,
+ CloseTLSHandshake: false,
+}
+
+func isValidReceivedCloseCode(code int) bool {
+ return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
+}
+
+// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
+// interface. The type of the value stored in a pool is not specified.
+type BufferPool interface {
+ // Get gets a value from the pool or returns nil if the pool is empty.
+ Get() interface{}
+ // Put adds a value to the pool.
+ Put(interface{})
+}
+
+// writePoolData is the type added to the write buffer pool. This wrapper is
+// used to prevent applications from peeking at and depending on the values
+// added to the pool.
+type writePoolData struct{ buf []byte }
+
+// The Conn type represents a WebSocket connection.
+type Conn struct {
+ conn net.Conn
+ isServer bool
+ subprotocol string
+
+ // Write fields
+ mu chan struct{} // used as mutex to protect write to conn
+ writeBuf []byte // frame is constructed in this buffer.
+ writePool BufferPool
+ writeBufSize int
+ writeDeadline time.Time
+ writer io.WriteCloser // the current writer returned to the application
+ isWriting bool // for best-effort concurrent write detection
+
+ writeErrMu sync.Mutex
+ writeErr error
+
+ enableWriteCompression bool
+ compressionLevel int
+ newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
+
+ // Read fields
+ reader io.ReadCloser // the current reader returned to the application
+ readErr error
+ br *bufio.Reader
+ // bytes remaining in current frame.
+ // set setReadRemaining to safely update this value and prevent overflow
+ readRemaining int64
+ readFinal bool // true the current message has more frames.
+ readLength int64 // Message size.
+ readLimit int64 // Maximum message size.
+ readMaskPos int
+ readMaskKey [4]byte
+ handlePong func(string) error
+ handlePing func(string) error
+ handleClose func(int, string) error
+ readErrCount int
+ messageReader *messageReader // the current low-level reader
+
+ readDecompress bool // whether last read frame had RSV1 set
+ newDecompressionReader func(io.Reader) io.ReadCloser
+}
+
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
+
+ if br == nil {
+ if readBufferSize == 0 {
+ readBufferSize = defaultReadBufferSize
+ } else if readBufferSize < maxControlFramePayloadSize {
+ // must be large enough for control frame
+ readBufferSize = maxControlFramePayloadSize
+ }
+ br = bufio.NewReaderSize(conn, readBufferSize)
+ }
+
+ if writeBufferSize <= 0 {
+ writeBufferSize = defaultWriteBufferSize
+ }
+ writeBufferSize += maxFrameHeaderSize
+
+ if writeBuf == nil && writeBufferPool == nil {
+ writeBuf = make([]byte, writeBufferSize)
+ }
+
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ c := &Conn{
+ isServer: isServer,
+ br: br,
+ conn: conn,
+ mu: mu,
+ readFinal: true,
+ writeBuf: writeBuf,
+ writePool: writeBufferPool,
+ writeBufSize: writeBufferSize,
+ enableWriteCompression: true,
+ compressionLevel: defaultCompressionLevel,
+ }
+ c.SetCloseHandler(nil)
+ c.SetPingHandler(nil)
+ c.SetPongHandler(nil)
+ return c
+}
+
+// setReadRemaining tracks the number of bytes remaining on the connection. If n
+// overflows, an ErrReadLimit is returned.
+func (c *Conn) setReadRemaining(n int64) error {
+ if n < 0 {
+ return ErrReadLimit
+ }
+
+ c.readRemaining = n
+ return nil
+}
+
+// Subprotocol returns the negotiated protocol for the connection.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+// Close closes the underlying network connection without sending or waiting
+// for a close message.
+func (c *Conn) Close() error {
+ return c.conn.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+// Write methods
+
+func (c *Conn) writeFatal(err error) error {
+ err = hideTempErr(err)
+ c.writeErrMu.Lock()
+ if c.writeErr == nil {
+ c.writeErr = err
+ }
+ c.writeErrMu.Unlock()
+ return err
+}
+
+func (c *Conn) read(n int) ([]byte, error) {
+ p, err := c.br.Peek(n)
+ if err == io.EOF {
+ err = errUnexpectedEOF
+ }
+ c.br.Discard(len(p))
+ return p, err
+}
+
+func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
+ <-c.mu
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ if len(buf1) == 0 {
+ _, err = c.conn.Write(buf0)
+ } else {
+ err = c.writeBufs(buf0, buf1)
+ }
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if frameType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return nil
+}
+
+// WriteControl writes a control message with the given deadline. The allowed
+// message types are CloseMessage, PingMessage and PongMessage.
+func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
+ if !isControl(messageType) {
+ return errBadWriteOpCode
+ }
+ if len(data) > maxControlFramePayloadSize {
+ return errInvalidControlFrame
+ }
+
+ b0 := byte(messageType) | finalBit
+ b1 := byte(len(data))
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
+ buf = append(buf, b0, b1)
+
+ if c.isServer {
+ buf = append(buf, data...)
+ } else {
+ key := newMaskKey()
+ buf = append(buf, key[:]...)
+ buf = append(buf, data...)
+ maskBytes(key, 0, buf[6:])
+ }
+
+ d := 1000 * time.Hour
+ if !deadline.IsZero() {
+ d = deadline.Sub(time.Now())
+ if d < 0 {
+ return errWriteTimeout
+ }
+ }
+
+ timer := time.NewTimer(d)
+ select {
+ case <-c.mu:
+ timer.Stop()
+ case <-timer.C:
+ return errWriteTimeout
+ }
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ _, err = c.conn.Write(buf)
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if messageType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return err
+}
+
+// beginMessage prepares a connection and message writer for a new message.
+func (c *Conn) beginMessage(mw *messageWriter, messageType int) error {
+ // Close previous writer if not already closed by the application. It's
+ // probably better to return an error in this situation, but we cannot
+ // change this without breaking existing applications.
+ if c.writer != nil {
+ c.writer.Close()
+ c.writer = nil
+ }
+
+ if !isControl(messageType) && !isData(messageType) {
+ return errBadWriteOpCode
+ }
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ mw.c = c
+ mw.frameType = messageType
+ mw.pos = maxFrameHeaderSize
+
+ if c.writeBuf == nil {
+ wpd, ok := c.writePool.Get().(writePoolData)
+ if ok {
+ c.writeBuf = wpd.buf
+ } else {
+ c.writeBuf = make([]byte, c.writeBufSize)
+ }
+ }
+ return nil
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+//
+// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
+// PongMessage) are supported.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return nil, err
+ }
+ c.writer = &mw
+ if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
+ w := c.newCompressionWriter(c.writer, c.compressionLevel)
+ mw.compress = true
+ c.writer = w
+ }
+ return c.writer, nil
+}
+
+type messageWriter struct {
+ c *Conn
+ compress bool // whether next call to flushFrame should set RSV1
+ pos int // end of data in writeBuf.
+ frameType int // type of the current frame.
+ err error
+}
+
+func (w *messageWriter) endMessage(err error) error {
+ if w.err != nil {
+ return err
+ }
+ c := w.c
+ w.err = err
+ c.writer = nil
+ if c.writePool != nil {
+ c.writePool.Put(writePoolData{buf: c.writeBuf})
+ c.writeBuf = nil
+ }
+ return err
+}
+
+// flushFrame writes buffered data and extra as a frame to the network. The
+// final argument indicates that this is the last frame in the message.
+func (w *messageWriter) flushFrame(final bool, extra []byte) error {
+ c := w.c
+ length := w.pos - maxFrameHeaderSize + len(extra)
+
+ // Check for invalid control frames.
+ if isControl(w.frameType) &&
+ (!final || length > maxControlFramePayloadSize) {
+ return w.endMessage(errInvalidControlFrame)
+ }
+
+ b0 := byte(w.frameType)
+ if final {
+ b0 |= finalBit
+ }
+ if w.compress {
+ b0 |= rsv1Bit
+ }
+ w.compress = false
+
+ b1 := byte(0)
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ // Assume that the frame starts at beginning of c.writeBuf.
+ framePos := 0
+ if c.isServer {
+ // Adjust up if mask not included in the header.
+ framePos = 4
+ }
+
+ switch {
+ case length >= 65536:
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 127
+ binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
+ case length > 125:
+ framePos += 6
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 126
+ binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
+ default:
+ framePos += 8
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | byte(length)
+ }
+
+ if !c.isServer {
+ key := newMaskKey()
+ copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
+ maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
+ if len(extra) > 0 {
+ return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode")))
+ }
+ }
+
+ // Write the buffers to the connection with best-effort detection of
+ // concurrent writes. See the concurrency section in the package
+ // documentation for more info.
+
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+
+ err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
+
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+
+ if err != nil {
+ return w.endMessage(err)
+ }
+
+ if final {
+ w.endMessage(errWriteClosed)
+ return nil
+ }
+
+ // Setup for next frame.
+ w.pos = maxFrameHeaderSize
+ w.frameType = continuationFrame
+ return nil
+}
+
+func (w *messageWriter) ncopy(max int) (int, error) {
+ n := len(w.c.writeBuf) - w.pos
+ if n <= 0 {
+ if err := w.flushFrame(false, nil); err != nil {
+ return 0, err
+ }
+ n = len(w.c.writeBuf) - w.pos
+ }
+ if n > max {
+ n = max
+ }
+ return n, nil
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
+ // Don't buffer large messages.
+ err := w.flushFrame(false, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) WriteString(p string) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for {
+ if w.pos == len(w.c.writeBuf) {
+ err = w.flushFrame(false, nil)
+ if err != nil {
+ break
+ }
+ }
+ var n int
+ n, err = r.Read(w.c.writeBuf[w.pos:])
+ w.pos += n
+ nn += int64(n)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ }
+ return nn, err
+}
+
+func (w *messageWriter) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+ return w.flushFrame(true, nil)
+}
+
+// WritePreparedMessage writes prepared message into connection.
+func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
+ frameType, frameData, err := pm.frame(prepareKey{
+ isServer: c.isServer,
+ compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
+ compressionLevel: c.compressionLevel,
+ })
+ if err != nil {
+ return err
+ }
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+ err = c.write(frameType, c.writeDeadline, frameData, nil)
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+ return err
+}
+
+// WriteMessage is a helper method for getting a writer using NextWriter,
+// writing the message and closing the writer.
+func (c *Conn) WriteMessage(messageType int, data []byte) error {
+
+ if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+ // Fast path with no allocations and single frame.
+
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return err
+ }
+ n := copy(c.writeBuf[mw.pos:], data)
+ mw.pos += n
+ data = data[n:]
+ return mw.flushFrame(true, data)
+ }
+
+ w, err := c.NextWriter(messageType)
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(data); err != nil {
+ return err
+ }
+ return w.Close()
+}
+
+// SetWriteDeadline sets the write deadline on the underlying network
+// connection. After a write has timed out, the websocket state is corrupt and
+// all future writes will return an error. A zero value for t means writes will
+// not time out.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ c.writeDeadline = t
+ return nil
+}
+
+// Read methods
+
+func (c *Conn) advanceFrame() (int, error) {
+ // 1. Skip remainder of previous frame.
+
+ if c.readRemaining > 0 {
+ if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 2. Read and parse first two bytes of frame header.
+
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ final := p[0]&finalBit != 0
+ frameType := int(p[0] & 0xf)
+ mask := p[1]&maskBit != 0
+ c.setReadRemaining(int64(p[1] & 0x7f))
+
+ c.readDecompress = false
+ if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 {
+ c.readDecompress = true
+ p[0] &^= rsv1Bit
+ }
+
+ if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 {
+ return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16))
+ }
+
+ switch frameType {
+ case CloseMessage, PingMessage, PongMessage:
+ if c.readRemaining > maxControlFramePayloadSize {
+ return noFrame, c.handleProtocolError("control frame length > 125")
+ }
+ if !final {
+ return noFrame, c.handleProtocolError("control frame not final")
+ }
+ case TextMessage, BinaryMessage:
+ if !c.readFinal {
+ return noFrame, c.handleProtocolError("message start before final message frame")
+ }
+ c.readFinal = final
+ case continuationFrame:
+ if c.readFinal {
+ return noFrame, c.handleProtocolError("continuation after final message frame")
+ }
+ c.readFinal = final
+ default:
+ return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
+ }
+
+ // 3. Read and parse frame length as per
+ // https://tools.ietf.org/html/rfc6455#section-5.2
+ //
+ // The length of the "Payload data", in bytes: if 0-125, that is the payload
+ // length.
+ // - If 126, the following 2 bytes interpreted as a 16-bit unsigned
+ // integer are the payload length.
+ // - If 127, the following 8 bytes interpreted as
+ // a 64-bit unsigned integer (the most significant bit MUST be 0) are the
+ // payload length. Multibyte length quantities are expressed in network byte
+ // order.
+
+ switch c.readRemaining {
+ case 126:
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil {
+ return noFrame, err
+ }
+ case 127:
+ p, err := c.read(8)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 4. Handle frame masking.
+
+ if mask != c.isServer {
+ return noFrame, c.handleProtocolError("incorrect mask flag")
+ }
+
+ if mask {
+ c.readMaskPos = 0
+ p, err := c.read(len(c.readMaskKey))
+ if err != nil {
+ return noFrame, err
+ }
+ copy(c.readMaskKey[:], p)
+ }
+
+ // 5. For text and binary messages, enforce read limit and return.
+
+ if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
+
+ c.readLength += c.readRemaining
+ // Don't allow readLength to overflow in the presence of a large readRemaining
+ // counter.
+ if c.readLength < 0 {
+ return noFrame, ErrReadLimit
+ }
+
+ if c.readLimit > 0 && c.readLength > c.readLimit {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+ return noFrame, ErrReadLimit
+ }
+
+ return frameType, nil
+ }
+
+ // 6. Read control frame payload.
+
+ var payload []byte
+ if c.readRemaining > 0 {
+ payload, err = c.read(int(c.readRemaining))
+ c.setReadRemaining(0)
+ if err != nil {
+ return noFrame, err
+ }
+ if c.isServer {
+ maskBytes(c.readMaskKey, 0, payload)
+ }
+ }
+
+ // 7. Process control frame payload.
+
+ switch frameType {
+ case PongMessage:
+ if err := c.handlePong(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case PingMessage:
+ if err := c.handlePing(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case CloseMessage:
+ closeCode := CloseNoStatusReceived
+ closeText := ""
+ if len(payload) >= 2 {
+ closeCode = int(binary.BigEndian.Uint16(payload))
+ if !isValidReceivedCloseCode(closeCode) {
+ return noFrame, c.handleProtocolError("invalid close code")
+ }
+ closeText = string(payload[2:])
+ if !utf8.ValidString(closeText) {
+ return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
+ }
+ }
+ if err := c.handleClose(closeCode, closeText); err != nil {
+ return noFrame, err
+ }
+ return noFrame, &CloseError{Code: closeCode, Text: closeText}
+ }
+
+ return frameType, nil
+}
+
+func (c *Conn) handleProtocolError(message string) error {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
+ return errors.New("websocket: " + message)
+}
+
+// NextReader returns the next data message received from the peer. The
+// returned messageType is either TextMessage or BinaryMessage.
+//
+// There can be at most one open reader on a connection. NextReader discards
+// the previous message if the application has not already consumed it.
+//
+// Applications must break out of the application's read loop when this method
+// returns a non-nil error value. Errors returned from this method are
+// permanent. Once this method returns a non-nil error, all subsequent calls to
+// this method return the same error.
+func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
+ // Close previous reader, only relevant for decompression.
+ if c.reader != nil {
+ c.reader.Close()
+ c.reader = nil
+ }
+
+ c.messageReader = nil
+ c.readLength = 0
+
+ for c.readErr == nil {
+ frameType, err := c.advanceFrame()
+ if err != nil {
+ c.readErr = hideTempErr(err)
+ break
+ }
+
+ if frameType == TextMessage || frameType == BinaryMessage {
+ c.messageReader = &messageReader{c}
+ c.reader = c.messageReader
+ if c.readDecompress {
+ c.reader = c.newDecompressionReader(c.reader)
+ }
+ return frameType, c.reader, nil
+ }
+ }
+
+ // Applications that do handle the error returned from this method spin in
+ // tight loop on connection failure. To help application developers detect
+ // this error, panic on repeated reads to the failed connection.
+ c.readErrCount++
+ if c.readErrCount >= 1000 {
+ panic("repeated read on failed websocket connection")
+ }
+
+ return noFrame, nil, c.readErr
+}
+
+type messageReader struct{ c *Conn }
+
+func (r *messageReader) Read(b []byte) (int, error) {
+ c := r.c
+ if c.messageReader != r {
+ return 0, io.EOF
+ }
+
+ for c.readErr == nil {
+
+ if c.readRemaining > 0 {
+ if int64(len(b)) > c.readRemaining {
+ b = b[:c.readRemaining]
+ }
+ n, err := c.br.Read(b)
+ c.readErr = hideTempErr(err)
+ if c.isServer {
+ c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
+ }
+ rem := c.readRemaining
+ rem -= int64(n)
+ c.setReadRemaining(rem)
+ if c.readRemaining > 0 && c.readErr == io.EOF {
+ c.readErr = errUnexpectedEOF
+ }
+ return n, c.readErr
+ }
+
+ if c.readFinal {
+ c.messageReader = nil
+ return 0, io.EOF
+ }
+
+ frameType, err := c.advanceFrame()
+ switch {
+ case err != nil:
+ c.readErr = hideTempErr(err)
+ case frameType == TextMessage || frameType == BinaryMessage:
+ c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
+ }
+ }
+
+ err := c.readErr
+ if err == io.EOF && c.messageReader == r {
+ err = errUnexpectedEOF
+ }
+ return 0, err
+}
+
+func (r *messageReader) Close() error {
+ return nil
+}
+
+// ReadMessage is a helper method for getting a reader using NextReader and
+// reading from that reader to a buffer.
+func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
+ var r io.Reader
+ messageType, r, err = c.NextReader()
+ if err != nil {
+ return messageType, nil, err
+ }
+ p, err = ioutil.ReadAll(r)
+ return messageType, p, err
+}
+
+// SetReadDeadline sets the read deadline on the underlying network connection.
+// After a read has timed out, the websocket connection state is corrupt and
+// all future reads will return an error. A zero value for t means reads will
+// not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a
+// message exceeds the limit, the connection sends a close message to the peer
+// and returns ErrReadLimit to the application.
+func (c *Conn) SetReadLimit(limit int64) {
+ c.readLimit = limit
+}
+
+// CloseHandler returns the current close handler
+func (c *Conn) CloseHandler() func(code int, text string) error {
+ return c.handleClose
+}
+
+// SetCloseHandler sets the handler for close messages received from the peer.
+// The code argument to h is the received close code or CloseNoStatusReceived
+// if the close message is empty. The default close handler sends a close
+// message back to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// close messages as described in the section on Control Messages above.
+//
+// The connection read methods return a CloseError when a close message is
+// received. Most applications should handle close messages as part of their
+// normal error handling. Applications should only set a close handler when the
+// application must perform some action before sending a close message back to
+// the peer.
+func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
+ if h == nil {
+ h = func(code int, text string) error {
+ message := FormatCloseMessage(code, "")
+ c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+ return nil
+ }
+ }
+ c.handleClose = h
+}
+
+// PingHandler returns the current ping handler
+func (c *Conn) PingHandler() func(appData string) error {
+ return c.handlePing
+}
+
+// SetPingHandler sets the handler for ping messages received from the peer.
+// The appData argument to h is the PING message application data. The default
+// ping handler sends a pong to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// ping messages as described in the section on Control Messages above.
+func (c *Conn) SetPingHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(message string) error {
+ err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
+ if err == ErrCloseSent {
+ return nil
+ } else if e, ok := err.(net.Error); ok && e.Temporary() {
+ return nil
+ }
+ return err
+ }
+ }
+ c.handlePing = h
+}
+
+// PongHandler returns the current pong handler
+func (c *Conn) PongHandler() func(appData string) error {
+ return c.handlePong
+}
+
+// SetPongHandler sets the handler for pong messages received from the peer.
+// The appData argument to h is the PONG message application data. The default
+// pong handler does nothing.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// pong messages as described in the section on Control Messages above.
+func (c *Conn) SetPongHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(string) error { return nil }
+ }
+ c.handlePong = h
+}
+
+// UnderlyingConn returns the internal net.Conn. This can be used to further
+// modifications to connection specific flags.
+func (c *Conn) UnderlyingConn() net.Conn {
+ return c.conn
+}
+
+// EnableWriteCompression enables and disables write compression of
+// subsequent text and binary messages. This function is a noop if
+// compression was not negotiated with the peer.
+func (c *Conn) EnableWriteCompression(enable bool) {
+ c.enableWriteCompression = enable
+}
+
+// SetCompressionLevel sets the flate compression level for subsequent text and
+// binary messages. This function is a noop if compression was not negotiated
+// with the peer. See the compress/flate package for a description of
+// compression levels.
+func (c *Conn) SetCompressionLevel(level int) error {
+ if !isValidCompressionLevel(level) {
+ return errors.New("websocket: invalid compression level")
+ }
+ c.compressionLevel = level
+ return nil
+}
+
+// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+// An empty message is returned for code CloseNoStatusReceived.
+func FormatCloseMessage(closeCode int, text string) []byte {
+ if closeCode == CloseNoStatusReceived {
+ // Return empty message because it's illegal to send
+ // CloseNoStatusReceived. Return non-nil value in case application
+ // checks for nil.
+ return []byte{}
+ }
+ buf := make([]byte, 2+len(text))
+ binary.BigEndian.PutUint16(buf, uint16(closeCode))
+ copy(buf[2:], text)
+ return buf
+}
diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go
new file mode 100644
index 0000000..a509a21
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn_write.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "net"
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ b := net.Buffers(bufs)
+ _, err := b.WriteTo(c.conn)
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go
new file mode 100644
index 0000000..37edaff
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn_write_legacy.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ for _, buf := range bufs {
+ if len(buf) > 0 {
+ if _, err := c.conn.Write(buf); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go
new file mode 100644
index 0000000..8db0cef
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/doc.go
@@ -0,0 +1,227 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements the WebSocket protocol defined in RFC 6455.
+//
+// Overview
+//
+// The Conn type represents a WebSocket connection. A server application calls
+// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
+//
+// var upgrader = websocket.Upgrader{
+// ReadBufferSize: 1024,
+// WriteBufferSize: 1024,
+// }
+//
+// func handler(w http.ResponseWriter, r *http.Request) {
+// conn, err := upgrader.Upgrade(w, r, nil)
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// ... Use conn to send and receive messages.
+// }
+//
+// Call the connection's WriteMessage and ReadMessage methods to send and
+// receive messages as a slice of bytes. This snippet of code shows how to echo
+// messages using these methods:
+//
+// for {
+// messageType, p, err := conn.ReadMessage()
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// if err := conn.WriteMessage(messageType, p); err != nil {
+// log.Println(err)
+// return
+// }
+// }
+//
+// In above snippet of code, p is a []byte and messageType is an int with value
+// websocket.BinaryMessage or websocket.TextMessage.
+//
+// An application can also send and receive messages using the io.WriteCloser
+// and io.Reader interfaces. To send a message, call the connection NextWriter
+// method to get an io.WriteCloser, write the message to the writer and close
+// the writer when done. To receive a message, call the connection NextReader
+// method to get an io.Reader and read until io.EOF is returned. This snippet
+// shows how to echo messages using the NextWriter and NextReader methods:
+//
+// for {
+// messageType, r, err := conn.NextReader()
+// if err != nil {
+// return
+// }
+// w, err := conn.NextWriter(messageType)
+// if err != nil {
+// return err
+// }
+// if _, err := io.Copy(w, r); err != nil {
+// return err
+// }
+// if err := w.Close(); err != nil {
+// return err
+// }
+// }
+//
+// Data Messages
+//
+// The WebSocket protocol distinguishes between text and binary data messages.
+// Text messages are interpreted as UTF-8 encoded text. The interpretation of
+// binary messages is left to the application.
+//
+// This package uses the TextMessage and BinaryMessage integer constants to
+// identify the two data message types. The ReadMessage and NextReader methods
+// return the type of the received message. The messageType argument to the
+// WriteMessage and NextWriter methods specifies the type of a sent message.
+//
+// It is the application's responsibility to ensure that text messages are
+// valid UTF-8 encoded text.
+//
+// Control Messages
+//
+// The WebSocket protocol defines three types of control messages: close, ping
+// and pong. Call the connection WriteControl, WriteMessage or NextWriter
+// methods to send a control message to the peer.
+//
+// Connections handle received close messages by calling the handler function
+// set with the SetCloseHandler method and by returning a *CloseError from the
+// NextReader, ReadMessage or the message Read method. The default close
+// handler sends a close message to the peer.
+//
+// Connections handle received ping messages by calling the handler function
+// set with the SetPingHandler method. The default ping handler sends a pong
+// message to the peer.
+//
+// Connections handle received pong messages by calling the handler function
+// set with the SetPongHandler method. The default pong handler does nothing.
+// If an application sends ping messages, then the application should set a
+// pong handler to receive the corresponding pong.
+//
+// The control message handler functions are called from the NextReader,
+// ReadMessage and message reader Read methods. The default close and ping
+// handlers can block these methods for a short time when the handler writes to
+// the connection.
+//
+// The application must read the connection to process close, ping and pong
+// messages sent from the peer. If the application is not otherwise interested
+// in messages from the peer, then the application should start a goroutine to
+// read and discard messages from the peer. A simple example is:
+//
+// func readLoop(c *websocket.Conn) {
+// for {
+// if _, _, err := c.NextReader(); err != nil {
+// c.Close()
+// break
+// }
+// }
+// }
+//
+// Concurrency
+//
+// Connections support one concurrent reader and one concurrent writer.
+//
+// Applications are responsible for ensuring that no more than one goroutine
+// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
+// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
+// that no more than one goroutine calls the read methods (NextReader,
+// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
+// concurrently.
+//
+// The Close and WriteControl methods can be called concurrently with all other
+// methods.
+//
+// Origin Considerations
+//
+// Web browsers allow Javascript applications to open a WebSocket connection to
+// any host. It's up to the server to enforce an origin policy using the Origin
+// request header sent by the browser.
+//
+// The Upgrader calls the function specified in the CheckOrigin field to check
+// the origin. If the CheckOrigin function returns false, then the Upgrade
+// method fails the WebSocket handshake with HTTP status 403.
+//
+// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
+// the handshake if the Origin request header is present and the Origin host is
+// not equal to the Host request header.
+//
+// The deprecated package-level Upgrade function does not perform origin
+// checking. The application is responsible for checking the Origin header
+// before calling the Upgrade function.
+//
+// Buffers
+//
+// Connections buffer network input and output to reduce the number
+// of system calls when reading or writing messages.
+//
+// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
+// Section 5 for a discussion of message framing. A WebSocket frame header is
+// written to the network each time a write buffer is flushed to the network.
+// Decreasing the size of the write buffer can increase the amount of framing
+// overhead on the connection.
+//
+// The buffer sizes in bytes are specified by the ReadBufferSize and
+// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
+// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
+// buffers created by the HTTP server when a buffer size field is set to zero.
+// The HTTP server buffers have a size of 4096 at the time of this writing.
+//
+// The buffer sizes do not limit the size of a message that can be read or
+// written by a connection.
+//
+// Buffers are held for the lifetime of the connection by default. If the
+// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
+// write buffer only when writing a message.
+//
+// Applications should tune the buffer sizes to balance memory use and
+// performance. Increasing the buffer size uses more memory, but can reduce the
+// number of system calls to read or write the network. In the case of writing,
+// increasing the buffer size can reduce the number of frame headers written to
+// the network.
+//
+// Some guidelines for setting buffer parameters are:
+//
+// Limit the buffer sizes to the maximum expected message size. Buffers larger
+// than the largest message do not provide any benefit.
+//
+// Depending on the distribution of message sizes, setting the buffer size to
+// a value less than the maximum expected message size can greatly reduce memory
+// use with a small impact on performance. Here's an example: If 99% of the
+// messages are smaller than 256 bytes and the maximum message size is 512
+// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
+// than a buffer size of 512 bytes. The memory savings is 50%.
+//
+// A write buffer pool is useful when the application has a modest number
+// writes over a large number of connections. when buffers are pooled, a larger
+// buffer size has a reduced impact on total memory use and has the benefit of
+// reducing system calls and frame overhead.
+//
+// Compression EXPERIMENTAL
+//
+// Per message compression extensions (RFC 7692) are experimentally supported
+// by this package in a limited capacity. Setting the EnableCompression option
+// to true in Dialer or Upgrader will attempt to negotiate per message deflate
+// support.
+//
+// var upgrader = websocket.Upgrader{
+// EnableCompression: true,
+// }
+//
+// If compression was successfully negotiated with the connection's peer, any
+// message received in compressed form will be automatically decompressed.
+// All Read methods will return uncompressed bytes.
+//
+// Per message compression of messages written to a connection can be enabled
+// or disabled by calling the corresponding Conn method:
+//
+// conn.EnableWriteCompression(false)
+//
+// Currently this package does not support compression with "context takeover".
+// This means that messages must be compressed and decompressed in isolation,
+// without retaining sliding window or dictionary state across messages. For
+// more details refer to RFC 7692.
+//
+// Use of compression is experimental and may result in decreased performance.
+package websocket
diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go
new file mode 100644
index 0000000..c64f8c8
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/join.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "io"
+ "strings"
+)
+
+// JoinMessages concatenates received messages to create a single io.Reader.
+// The string term is appended to each message. The returned reader does not
+// support concurrent calls to the Read method.
+func JoinMessages(c *Conn, term string) io.Reader {
+ return &joinReader{c: c, term: term}
+}
+
+type joinReader struct {
+ c *Conn
+ term string
+ r io.Reader
+}
+
+func (r *joinReader) Read(p []byte) (int, error) {
+ if r.r == nil {
+ var err error
+ _, r.r, err = r.c.NextReader()
+ if err != nil {
+ return 0, err
+ }
+ if r.term != "" {
+ r.r = io.MultiReader(r.r, strings.NewReader(r.term))
+ }
+ }
+ n, err := r.r.Read(p)
+ if err == io.EOF {
+ err = nil
+ r.r = nil
+ }
+ return n, err
+}
diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go
new file mode 100644
index 0000000..dc2c1f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/json.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// Deprecated: Use c.WriteJSON instead.
+func WriteJSON(c *Conn, v interface{}) error {
+ return c.WriteJSON(v)
+}
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// See the documentation for encoding/json Marshal for details about the
+// conversion of Go values to JSON.
+func (c *Conn) WriteJSON(v interface{}) error {
+ w, err := c.NextWriter(TextMessage)
+ if err != nil {
+ return err
+ }
+ err1 := json.NewEncoder(w).Encode(v)
+ err2 := w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// Deprecated: Use c.ReadJSON instead.
+func ReadJSON(c *Conn, v interface{}) error {
+ return c.ReadJSON(v)
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// See the documentation for the encoding/json Unmarshal function for details
+// about the conversion of JSON to a Go value.
+func (c *Conn) ReadJSON(v interface{}) error {
+ _, r, err := c.NextReader()
+ if err != nil {
+ return err
+ }
+ err = json.NewDecoder(r).Decode(v)
+ if err == io.EOF {
+ // One value is expected in the message.
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go
new file mode 100644
index 0000000..577fce9
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build !appengine
+
+package websocket
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ // Mask one byte at a time for small buffers.
+ if len(b) < 2*wordSize {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+ }
+
+ // Mask one byte at a time to word boundary.
+ if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
+ n = wordSize - n
+ for i := range b[:n] {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ b = b[n:]
+ }
+
+ // Create aligned word size key.
+ var k [wordSize]byte
+ for i := range k {
+ k[i] = key[(pos+i)&3]
+ }
+ kw := *(*uintptr)(unsafe.Pointer(&k))
+
+ // Mask one word at a time.
+ n := (len(b) / wordSize) * wordSize
+ for i := 0; i < n; i += wordSize {
+ *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
+ }
+
+ // Mask one byte at a time for remaining bytes.
+ b = b[n:]
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go
new file mode 100644
index 0000000..2aac060
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask_safe.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build appengine
+
+package websocket
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go
new file mode 100644
index 0000000..c854225
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/prepared.go
@@ -0,0 +1,102 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "net"
+ "sync"
+ "time"
+)
+
+// PreparedMessage caches on the wire representations of a message payload.
+// Use PreparedMessage to efficiently send a message payload to multiple
+// connections. PreparedMessage is especially useful when compression is used
+// because the CPU and memory expensive compression operation can be executed
+// once for a given set of compression options.
+type PreparedMessage struct {
+ messageType int
+ data []byte
+ mu sync.Mutex
+ frames map[prepareKey]*preparedFrame
+}
+
+// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
+type prepareKey struct {
+ isServer bool
+ compress bool
+ compressionLevel int
+}
+
+// preparedFrame contains data in wire representation.
+type preparedFrame struct {
+ once sync.Once
+ data []byte
+}
+
+// NewPreparedMessage returns an initialized PreparedMessage. You can then send
+// it to connection using WritePreparedMessage method. Valid wire
+// representation will be calculated lazily only once for a set of current
+// connection options.
+func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
+ pm := &PreparedMessage{
+ messageType: messageType,
+ frames: make(map[prepareKey]*preparedFrame),
+ data: data,
+ }
+
+ // Prepare a plain server frame.
+ _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
+ if err != nil {
+ return nil, err
+ }
+
+ // To protect against caller modifying the data argument, remember the data
+ // copied to the plain server frame.
+ pm.data = frameData[len(frameData)-len(data):]
+ return pm, nil
+}
+
+func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
+ pm.mu.Lock()
+ frame, ok := pm.frames[key]
+ if !ok {
+ frame = &preparedFrame{}
+ pm.frames[key] = frame
+ }
+ pm.mu.Unlock()
+
+ var err error
+ frame.once.Do(func() {
+ // Prepare a frame using a 'fake' connection.
+ // TODO: Refactor code in conn.go to allow more direct construction of
+ // the frame.
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ var nc prepareConn
+ c := &Conn{
+ conn: &nc,
+ mu: mu,
+ isServer: key.isServer,
+ compressionLevel: key.compressionLevel,
+ enableWriteCompression: true,
+ writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
+ }
+ if key.compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ }
+ err = c.WriteMessage(pm.messageType, pm.data)
+ frame.data = nc.buf.Bytes()
+ })
+ return pm.messageType, frame.data, err
+}
+
+type prepareConn struct {
+ buf bytes.Buffer
+ net.Conn
+}
+
+func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
+func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go
new file mode 100644
index 0000000..e87a8c9
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/proxy.go
@@ -0,0 +1,77 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/base64"
+ "errors"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type netDialerFunc func(network, addr string) (net.Conn, error)
+
+func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
+ return fn(network, addr)
+}
+
+func init() {
+ proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
+ return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
+ })
+}
+
+type httpProxyDialer struct {
+ proxyURL *url.URL
+ forwardDial func(network, addr string) (net.Conn, error)
+}
+
+func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
+ hostPort, _ := hostPortNoPort(hpd.proxyURL)
+ conn, err := hpd.forwardDial(network, hostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connectHeader := make(http.Header)
+ if user := hpd.proxyURL.User; user != nil {
+ proxyUser := user.Username()
+ if proxyPassword, passwordSet := user.Password(); passwordSet {
+ credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+ connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+ }
+ }
+
+ connectReq := &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: addr},
+ Host: addr,
+ Header: connectHeader,
+ }
+
+ if err := connectReq.Write(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Read response. It's OK to use and discard buffered reader here becaue
+ // the remote server does not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ conn.Close()
+ f := strings.SplitN(resp.Status, " ", 2)
+ return nil, errors.New(f[1])
+ }
+ return conn, nil
+}
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
new file mode 100644
index 0000000..887d558
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -0,0 +1,363 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// HandshakeError describes an error with the handshake from the peer.
+type HandshakeError struct {
+ message string
+}
+
+func (e HandshakeError) Error() string { return e.message }
+
+// Upgrader specifies parameters for upgrading an HTTP connection to a
+// WebSocket connection.
+type Upgrader struct {
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then buffers allocated by the HTTP server are used. The
+ // I/O buffer sizes do not limit the size of the messages that can be sent
+ // or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the server's supported protocols in order of
+ // preference. If this field is not nil, then the Upgrade method negotiates a
+ // subprotocol by selecting the first match in this list with a protocol
+ // requested by the client. If there's no match, then no protocol is
+ // negotiated (the Sec-Websocket-Protocol header is not included in the
+ // handshake response).
+ Subprotocols []string
+
+ // Error specifies the function for generating HTTP error responses. If Error
+ // is nil, then http.Error is used to generate the HTTP response.
+ Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
+
+ // CheckOrigin returns true if the request Origin header is acceptable. If
+ // CheckOrigin is nil, then a safe default is used: return false if the
+ // Origin request header is present and the origin host is not equal to
+ // request Host header.
+ //
+ // A CheckOrigin function should carefully validate the request origin to
+ // prevent cross-site request forgery.
+ CheckOrigin func(r *http.Request) bool
+
+ // EnableCompression specify if the server should attempt to negotiate per
+ // message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+}
+
+func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
+ err := HandshakeError{reason}
+ if u.Error != nil {
+ u.Error(w, r, status, err)
+ } else {
+ w.Header().Set("Sec-Websocket-Version", "13")
+ http.Error(w, http.StatusText(status), status)
+ }
+ return nil, err
+}
+
+// checkSameOrigin returns true if the origin is not set or is equal to the request host.
+func checkSameOrigin(r *http.Request) bool {
+ origin := r.Header["Origin"]
+ if len(origin) == 0 {
+ return true
+ }
+ u, err := url.Parse(origin[0])
+ if err != nil {
+ return false
+ }
+ return equalASCIIFold(u.Host, r.Host)
+}
+
+func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
+ if u.Subprotocols != nil {
+ clientProtocols := Subprotocols(r)
+ for _, serverProtocol := range u.Subprotocols {
+ for _, clientProtocol := range clientProtocols {
+ if clientProtocol == serverProtocol {
+ return clientProtocol
+ }
+ }
+ }
+ } else if responseHeader != nil {
+ return responseHeader.Get("Sec-Websocket-Protocol")
+ }
+ return ""
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// application negotiated subprotocol (Sec-WebSocket-Protocol).
+//
+// If the upgrade fails, then Upgrade replies to the client with an HTTP error
+// response.
+func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
+ const badHandshake = "websocket: the client is not using the websocket protocol: "
+
+ if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
+ }
+
+ if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
+ }
+
+ if r.Method != "GET" {
+ return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
+ }
+
+ if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
+ }
+
+ if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
+ }
+
+ checkOrigin := u.CheckOrigin
+ if checkOrigin == nil {
+ checkOrigin = checkSameOrigin
+ }
+ if !checkOrigin(r) {
+ return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
+ }
+
+ challengeKey := r.Header.Get("Sec-Websocket-Key")
+ if challengeKey == "" {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
+ }
+
+ subprotocol := u.selectSubprotocol(r, responseHeader)
+
+ // Negotiate PMCE
+ var compress bool
+ if u.EnableCompression {
+ for _, ext := range parseExtensions(r.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ compress = true
+ break
+ }
+ }
+
+ h, ok := w.(http.Hijacker)
+ if !ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
+ }
+ var brw *bufio.ReadWriter
+ netConn, brw, err := h.Hijack()
+ if err != nil {
+ return u.returnError(w, r, http.StatusInternalServerError, err.Error())
+ }
+
+ if brw.Reader.Buffered() > 0 {
+ netConn.Close()
+ return nil, errors.New("websocket: client sent data before handshake is complete")
+ }
+
+ var br *bufio.Reader
+ if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
+ // Reuse hijacked buffered reader as connection reader.
+ br = brw.Reader
+ }
+
+ buf := bufioWriterBuffer(netConn, brw.Writer)
+
+ var writeBuf []byte
+ if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
+ // Reuse hijacked write buffer as connection buffer.
+ writeBuf = buf
+ }
+
+ c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
+ c.subprotocol = subprotocol
+
+ if compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ c.newDecompressionReader = decompressNoContextTakeover
+ }
+
+ // Use larger of hijacked buffer and connection write buffer for header.
+ p := buf
+ if len(c.writeBuf) > len(p) {
+ p = c.writeBuf
+ }
+ p = p[:0]
+
+ p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
+ p = append(p, computeAcceptKey(challengeKey)...)
+ p = append(p, "\r\n"...)
+ if c.subprotocol != "" {
+ p = append(p, "Sec-WebSocket-Protocol: "...)
+ p = append(p, c.subprotocol...)
+ p = append(p, "\r\n"...)
+ }
+ if compress {
+ p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
+ }
+ for k, vs := range responseHeader {
+ if k == "Sec-Websocket-Protocol" {
+ continue
+ }
+ for _, v := range vs {
+ p = append(p, k...)
+ p = append(p, ": "...)
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if b <= 31 {
+ // prevent response splitting.
+ b = ' '
+ }
+ p = append(p, b)
+ }
+ p = append(p, "\r\n"...)
+ }
+ }
+ p = append(p, "\r\n"...)
+
+ // Clear deadlines set by HTTP server.
+ netConn.SetDeadline(time.Time{})
+
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+ }
+ if _, err = netConn.Write(p); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Time{})
+ }
+
+ return c, nil
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// Deprecated: Use websocket.Upgrader instead.
+//
+// Upgrade does not perform origin checking. The application is responsible for
+// checking the Origin header before calling Upgrade. An example implementation
+// of the same origin policy check is:
+//
+// if req.Header.Get("Origin") != "http://"+req.Host {
+// http.Error(w, "Origin not allowed", http.StatusForbidden)
+// return
+// }
+//
+// If the endpoint supports subprotocols, then the application is responsible
+// for negotiating the protocol used on the connection. Use the Subprotocols()
+// function to get the subprotocols requested by the client. Use the
+// Sec-Websocket-Protocol response header to specify the subprotocol selected
+// by the application.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// The connection buffers IO to the underlying network connection. The
+// readBufSize and writeBufSize parameters specify the size of the buffers to
+// use. Messages can be larger than the buffers.
+//
+// If the request is not a valid WebSocket handshake, then Upgrade returns an
+// error of type HandshakeError. Applications should handle this error by
+// replying to the client with an HTTP error response.
+func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
+ u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
+ u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+ // don't return errors to maintain backwards compatibility
+ }
+ u.CheckOrigin = func(r *http.Request) bool {
+ // allow all connections by default
+ return true
+ }
+ return u.Upgrade(w, r, responseHeader)
+}
+
+// Subprotocols returns the subprotocols requested by the client in the
+// Sec-Websocket-Protocol header.
+func Subprotocols(r *http.Request) []string {
+ h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
+ if h == "" {
+ return nil
+ }
+ protocols := strings.Split(h, ",")
+ for i := range protocols {
+ protocols[i] = strings.TrimSpace(protocols[i])
+ }
+ return protocols
+}
+
+// IsWebSocketUpgrade returns true if the client requested upgrade to the
+// WebSocket protocol.
+func IsWebSocketUpgrade(r *http.Request) bool {
+ return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
+ tokenListContainsValue(r.Header, "Upgrade", "websocket")
+}
+
+// bufioReaderSize size returns the size of a bufio.Reader.
+func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
+ // This code assumes that peek on a reset reader returns
+ // bufio.Reader.buf[:0].
+ // TODO: Use bufio.Reader.Size() after Go 1.10
+ br.Reset(originalReader)
+ if p, err := br.Peek(0); err == nil {
+ return cap(p)
+ }
+ return 0
+}
+
+// writeHook is an io.Writer that records the last slice passed to it vio
+// io.Writer.Write.
+type writeHook struct {
+ p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+ wh.p = p
+ return len(p), nil
+}
+
+// bufioWriterBuffer grabs the buffer from a bufio.Writer.
+func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
+ // This code assumes that bufio.Writer.buf[:1] is passed to the
+ // bufio.Writer's underlying writer.
+ var wh writeHook
+ bw.Reset(&wh)
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(originalWriter)
+
+ return wh.p[:cap(wh.p)]
+}
diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go
new file mode 100644
index 0000000..834f122
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/trace.go
@@ -0,0 +1,19 @@
+// +build go1.8
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+ if trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ err := doHandshake(tlsConn, cfg)
+ if trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
+ }
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go
new file mode 100644
index 0000000..77d05a0
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/trace_17.go
@@ -0,0 +1,12 @@
+// +build !go1.8
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+ return doHandshake(tlsConn, cfg)
+}
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
new file mode 100644
index 0000000..7bf2f66
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/util.go
@@ -0,0 +1,283 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "io"
+ "net/http"
+ "strings"
+ "unicode/utf8"
+)
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func computeAcceptKey(challengeKey string) string {
+ h := sha1.New()
+ h.Write([]byte(challengeKey))
+ h.Write(keyGUID)
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func generateChallengeKey() (string, error) {
+ p := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, p); err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(p), nil
+}
+
+// Token octets per RFC 2616.
+var isTokenOctet = [256]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+// skipSpace returns a slice of the string s with all leading RFC 2616 linear
+// whitespace removed.
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if b := s[i]; b != ' ' && b != '\t' {
+ break
+ }
+ }
+ return s[i:]
+}
+
+// nextToken returns the leading RFC 2616 token of s and the string following
+// the token.
+func nextToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if !isTokenOctet[s[i]] {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
+// and the string following the token or quoted string.
+func nextTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return nextToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
+
+// equalASCIIFold returns true if s is equal to t with ASCII case folding as
+// defined in RFC 4790.
+func equalASCIIFold(s, t string) bool {
+ for s != "" && t != "" {
+ sr, size := utf8.DecodeRuneInString(s)
+ s = s[size:]
+ tr, size := utf8.DecodeRuneInString(t)
+ t = t[size:]
+ if sr == tr {
+ continue
+ }
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+ return s == t
+}
+
+// tokenListContainsValue returns true if the 1#token header with the given
+// name contains a token equal to value with ASCII case folding.
+func tokenListContainsValue(header http.Header, name string, value string) bool {
+headers:
+ for _, s := range header[name] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ if equalASCIIFold(t, value) {
+ return true
+ }
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return false
+}
+
+// parseExtensions parses WebSocket extensions from a header.
+func parseExtensions(header http.Header) []map[string]string {
+ // From RFC 6455:
+ //
+ // Sec-WebSocket-Extensions = extension-list
+ // extension-list = 1#extension
+ // extension = extension-token *( ";" extension-param )
+ // extension-token = registered-token
+ // registered-token = token
+ // extension-param = token [ "=" (token | quoted-string) ]
+ // ;When using the quoted-string syntax variant, the value
+ // ;after quoted-string unescaping MUST conform to the
+ // ;'token' ABNF.
+
+ var result []map[string]string
+headers:
+ for _, s := range header["Sec-Websocket-Extensions"] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ ext := map[string]string{"": t}
+ for {
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ";") {
+ break
+ }
+ var k string
+ k, s = nextToken(skipSpace(s[1:]))
+ if k == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ var v string
+ if strings.HasPrefix(s, "=") {
+ v, s = nextTokenOrQuoted(skipSpace(s[1:]))
+ s = skipSpace(s)
+ }
+ if s != "" && s[0] != ',' && s[0] != ';' {
+ continue headers
+ }
+ ext[k] = v
+ }
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ result = append(result, ext)
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go
new file mode 100644
index 0000000..2e668f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go
@@ -0,0 +1,473 @@
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+//
+
+package websocket
+
+import (
+ "errors"
+ "io"
+ "net"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type proxy_direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var proxy_Direct = proxy_direct{}
+
+func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type proxy_PerHost struct {
+ def, bypass proxy_Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
+ return &proxy_PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone ".example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *proxy_PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *proxy_PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *proxy_PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *proxy_PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
+
+// A Dialer is a means to establish a connection.
+type proxy_Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type proxy_Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func proxy_FromEnvironment() proxy_Dialer {
+ allProxy := proxy_allProxyEnv.Get()
+ if len(allProxy) == 0 {
+ return proxy_Direct
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return proxy_Direct
+ }
+ proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
+ if err != nil {
+ return proxy_Direct
+ }
+
+ noProxy := proxy_noProxyEnv.Get()
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := proxy_NewPerHost(proxy, proxy_Direct)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
+ if proxy_proxySchemes == nil {
+ proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
+ }
+ proxy_proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
+ var auth *proxy_Auth
+ if u.User != nil {
+ auth = new(proxy_Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5":
+ return proxy_SOCKS5("tcp", u.Host, auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxy_proxySchemes != nil {
+ if f, ok := proxy_proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+ proxy_allProxyEnv = &proxy_envOnce{
+ names: []string{"ALL_PROXY", "all_proxy"},
+ }
+ proxy_noProxyEnv = &proxy_envOnce{
+ names: []string{"NO_PROXY", "no_proxy"},
+ }
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type proxy_envOnce struct {
+ names []string
+ once sync.Once
+ val string
+}
+
+func (e *proxy_envOnce) Get() string {
+ e.once.Do(e.init)
+ return e.val
+}
+
+func (e *proxy_envOnce) init() {
+ for _, n := range e.names {
+ e.val = os.Getenv(n)
+ if e.val != "" {
+ return
+ }
+ }
+}
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928 and RFC 1929.
+func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
+ s := &proxy_socks5{
+ network: network,
+ addr: addr,
+ forward: forward,
+ }
+ if auth != nil {
+ s.user = auth.User
+ s.password = auth.Password
+ }
+
+ return s, nil
+}
+
+type proxy_socks5 struct {
+ user, password string
+ network, addr string
+ forward proxy_Dialer
+}
+
+const proxy_socks5Version = 5
+
+const (
+ proxy_socks5AuthNone = 0
+ proxy_socks5AuthPassword = 2
+)
+
+const proxy_socks5Connect = 1
+
+const (
+ proxy_socks5IP4 = 1
+ proxy_socks5Domain = 3
+ proxy_socks5IP6 = 4
+)
+
+var proxy_socks5Errors = []string{
+ "",
+ "general failure",
+ "connection forbidden",
+ "network unreachable",
+ "host unreachable",
+ "connection refused",
+ "TTL expired",
+ "command not supported",
+ "address type not supported",
+}
+
+// Dial connects to the address addr on the given network via the SOCKS5 proxy.
+func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+ }
+
+ conn, err := s.forward.Dial(s.network, s.addr)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.connect(conn, addr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ return conn, nil
+}
+
+// connect takes an existing connection to a socks5 proxy server,
+// and commands the server to extend that connection to target,
+// which must be a canonical address with a host and port.
+func (s *proxy_socks5) connect(conn net.Conn, target string) error {
+ host, portStr, err := net.SplitHostPort(target)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return errors.New("proxy: failed to parse port number: " + portStr)
+ }
+ if port < 1 || port > 0xffff {
+ return errors.New("proxy: port number out of range: " + portStr)
+ }
+
+ // the size here is just an estimate
+ buf := make([]byte, 0, 6+len(host))
+
+ buf = append(buf, proxy_socks5Version)
+ if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+ buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
+ } else {
+ buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
+ }
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ if buf[0] != 5 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+ }
+ if buf[1] == 0xff {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+ }
+
+ // See RFC 1929
+ if buf[1] == proxy_socks5AuthPassword {
+ buf = buf[:0]
+ buf = append(buf, 1 /* password protocol version */)
+ buf = append(buf, uint8(len(s.user)))
+ buf = append(buf, s.user...)
+ buf = append(buf, uint8(len(s.password)))
+ buf = append(buf, s.password...)
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if buf[1] != 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+ }
+ }
+
+ buf = buf[:0]
+ buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
+
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ buf = append(buf, proxy_socks5IP4)
+ ip = ip4
+ } else {
+ buf = append(buf, proxy_socks5IP6)
+ }
+ buf = append(buf, ip...)
+ } else {
+ if len(host) > 255 {
+ return errors.New("proxy: destination host name too long: " + host)
+ }
+ buf = append(buf, proxy_socks5Domain)
+ buf = append(buf, byte(len(host)))
+ buf = append(buf, host...)
+ }
+ buf = append(buf, byte(port>>8), byte(port))
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+ return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ failure := "unknown error"
+ if int(buf[1]) < len(proxy_socks5Errors) {
+ failure = proxy_socks5Errors[buf[1]]
+ }
+
+ if len(failure) > 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+ }
+
+ bytesToDiscard := 0
+ switch buf[3] {
+ case proxy_socks5IP4:
+ bytesToDiscard = net.IPv4len
+ case proxy_socks5IP6:
+ bytesToDiscard = net.IPv6len
+ case proxy_socks5Domain:
+ _, err := io.ReadFull(conn, buf[:1])
+ if err != nil {
+ return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ bytesToDiscard = int(buf[0])
+ default:
+ return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+ }
+
+ if cap(buf) < bytesToDiscard {
+ buf = make([]byte, bytesToDiscard)
+ } else {
+ buf = buf[:bytesToDiscard]
+ }
+ if _, err := io.ReadFull(conn, buf); err != nil {
+ return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ // Also need to discard the port number
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattn/go-pointer/LICENSE b/vendor/github.com/mattn/go-pointer/LICENSE
new file mode 100644
index 0000000..5794edd
--- /dev/null
+++ b/vendor/github.com/mattn/go-pointer/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2019 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-pointer/README.md b/vendor/github.com/mattn/go-pointer/README.md
new file mode 100644
index 0000000..c74eee2
--- /dev/null
+++ b/vendor/github.com/mattn/go-pointer/README.md
@@ -0,0 +1,29 @@
+# go-pointer
+
+Utility for cgo
+
+## Usage
+
+https://github.com/golang/proposal/blob/master/design/12416-cgo-pointers.md
+
+In go 1.6, cgo argument can't be passed Go pointer.
+
+```
+var s string
+C.pass_pointer(pointer.Save(&s))
+v := *(pointer.Restore(C.get_from_pointer()).(*string))
+```
+
+## Installation
+
+```
+go get github.com/mattn/go-pointer
+```
+
+## License
+
+MIT
+
+## Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/mattn/go-pointer/doc.go b/vendor/github.com/mattn/go-pointer/doc.go
new file mode 100644
index 0000000..c27bd8c
--- /dev/null
+++ b/vendor/github.com/mattn/go-pointer/doc.go
@@ -0,0 +1 @@
+package pointer
diff --git a/vendor/github.com/mattn/go-pointer/pointer.go b/vendor/github.com/mattn/go-pointer/pointer.go
new file mode 100644
index 0000000..08a9853
--- /dev/null
+++ b/vendor/github.com/mattn/go-pointer/pointer.go
@@ -0,0 +1,57 @@
+package pointer
+
+// #include
+import "C"
+import (
+ "sync"
+ "unsafe"
+)
+
+var (
+ mutex sync.RWMutex
+ store = map[unsafe.Pointer]interface{}{}
+)
+
+func Save(v interface{}) unsafe.Pointer {
+ if v == nil {
+ return nil
+ }
+
+ // Generate real fake C pointer.
+ // This pointer will not store any data, but will bi used for indexing purposes.
+ // Since Go doest allow to cast dangling pointer to unsafe.Pointer, we do rally allocate one byte.
+ // Why we need indexing, because Go doest allow C code to store pointers to Go data.
+ var ptr unsafe.Pointer = C.malloc(C.size_t(1))
+ if ptr == nil {
+ panic("can't allocate 'cgo-pointer hack index pointer': ptr == nil")
+ }
+
+ mutex.Lock()
+ store[ptr] = v
+ mutex.Unlock()
+
+ return ptr
+}
+
+func Restore(ptr unsafe.Pointer) (v interface{}) {
+ if ptr == nil {
+ return nil
+ }
+
+ mutex.RLock()
+ v = store[ptr]
+ mutex.RUnlock()
+ return
+}
+
+func Unref(ptr unsafe.Pointer) {
+ if ptr == nil {
+ return
+ }
+
+ mutex.Lock()
+ delete(store, ptr)
+ mutex.Unlock()
+
+ C.free(ptr)
+}
diff --git a/vendor/github.com/mattn/go-tflite/.gitignore b/vendor/github.com/mattn/go-tflite/.gitignore
new file mode 100644
index 0000000..5641773
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/.gitignore
@@ -0,0 +1,19 @@
+tmp
+*.exe
+*.dll
+*.bmp
+*.jpg
+*.png
+*.mp4
+#_example/grace_hopper.bmp
+#_example/grace_hopper.png
+#_example/main.exe
+#_example/notebook.png
+#_example/output_graph.tflite
+#_example/output_labels.txt
+#_example/webcam/aaa.mp4
+#_example/webcam/bbb.mp4
+#_example/webcam/libtensorflowlite_c.dll
+#_example/webcam/mobilenet_quant_v1_224.tflite
+#_example/webcam/webcam.exe
+.ipynb_checkpoints
diff --git a/vendor/github.com/mattn/go-tflite/LICENSE b/vendor/github.com/mattn/go-tflite/LICENSE
new file mode 100644
index 0000000..5794edd
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2019 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-tflite/Makefile.tflite b/vendor/github.com/mattn/go-tflite/Makefile.tflite
new file mode 100644
index 0000000..f55059b
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/Makefile.tflite
@@ -0,0 +1,49 @@
+SRCS = \
+ c_api.cc \
+ c_api_experimental.cc
+
+OBJS = $(subst .cc,.o,$(subst .cxx,.o,$(subst .cpp,.o,$(SRCS))))
+
+TENSORFLOW_ROOT = $(shell go env GOPATH)/src/github.com/tensorflow/tensorflow
+CXXFLAGS := -fPIC -DTF_COMPILE_LIBRARY -I$(TENSORFLOW_ROOT) \
+ -I$(TENSORFLOW_ROOT)/tensorflow/lite/tools/make/downloads/flatbuffers/include \
+ -I$(TENSORFLOW_ROOT)/tensorflow/lite/tools/make/downloads/absl
+TARGET = libtensorflowlite_c
+ifeq ($(OS),Windows_NT)
+OS_ARCH = windows_x86_64
+TARGET_SHARED := $(TARGET).dll
+else
+ifeq ($(shell uname -s),Darwin)
+CXXFLAGS := -std=c++11 $(CXXFLAGS)
+OS_ARCH = osx_$(shell uname -m)
+else
+ifeq ($(shell uname -m),x86_64)
+OS_ARCH = linux_x86_64
+else
+ifeq ($(shell uname -m),armv6l)
+OS_ARCH = linux_armv6l
+else
+OS_ARCH = rpi_armv7l
+endif
+endif
+endif
+TARGET_SHARED := $(TARGET).so
+endif
+LDFLAGS += -L$(TENSORFLOW_ROOT)/tensorflow/lite/tools/make/gen/$(OS_ARCH)/lib
+LIBS = -ltensorflow-lite
+
+.SUFFIXES: .cpp .cxx .o
+
+all : $(TARGET_SHARED)
+
+$(TARGET_SHARED) : $(OBJS)
+ g++ -shared -o $@ $(OBJS) $(LDFLAGS) $(LIBS)
+
+.cxx.o :
+ g++ -std=c++14 -c $(CXXFLAGS) -I. $< -o $@
+
+.cpp.o :
+ g++ -std=c++14 -c $(CXXFLAGS) -I. $< -o $@
+
+clean :
+ rm -f *.o $(TARGET_SHARED)
diff --git a/vendor/github.com/mattn/go-tflite/README.md b/vendor/github.com/mattn/go-tflite/README.md
new file mode 100644
index 0000000..71bc744
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/README.md
@@ -0,0 +1,96 @@
+# go-tflite
+
+Go binding for TensorFlow Lite
+
+
+
+## Usage
+
+```go
+model := tflite.NewModelFromFile("sin_model.tflite")
+if model == nil {
+ log.Fatal("cannot load model")
+}
+defer model.Delete()
+
+options := tflite.NewInterpreterOptions()
+defer options.Delete()
+
+interpreter := tflite.NewInterpreter(model, options)
+defer interpreter.Delete()
+
+interpreter.AllocateTensors()
+
+v := float64(1.2) * math.Pi / 180.0
+input := interpreter.GetInputTensor(0)
+input.Float32s()[0] = float32(v)
+interpreter.Invoke()
+got := float64(interpreter.GetOutputTensor(0).Float32s()[0])
+```
+
+See `_example` for more examples
+
+## Requirements
+
+* TensorFlow Lite - This release requires 2.2.0-rc3
+
+## Tensorflow Installation
+
+You must install Tensorflow Lite C API. Assuming the source is under /source/directory/tensorflow
+
+```
+$ cd /source/directory/tensorflow
+$ bazel build --config opt --config monolithic tensorflow:libtensorflow_c.so
+```
+
+Or to just compile the tensorflow lite libraries:
+```
+$ cd /some/path/tensorflow
+$ bazel build --config opt --config monolithic //tensorflow/lite:libtensorflowlite.so
+$ bazel build --config opt --config monolithic //tensorflow/lite/c:libtensorflowlite_c.so
+```
+
+In order for go to find the headers you must set the CGO_CFLAGS environment variable for the source and libraries of tensorflow.
+If your libraries are not installed in a standard location, you must also give the go linker the path to the shared librares
+with the CGO_LDFLAGS environment variable.
+
+```
+$ export CGO_CFLAGS=-I/source/directory/tensorflow
+$ export CGO_LDFLAGS=-L/path/to/tensorflow/libaries
+```
+
+If you don't love bazel, you can try `Makefile.tflite`.
+Put this file as `Makefile` in `tensorflow/lite/c`, and run `make`.
+Sorry, this has not been test for Linux or Mac
+
+Then run `go build` on some of the examples.
+
+## Edge TPU
+To be able to compile and use the EdgeTPU delegate, you need to install the libraries from here:
+https://github.com/google-coral/edgetpu
+
+There is also a deb package here:
+https://coral.withgoogle.com/docs/accelerator/get-started/#1-install-the-edge-tpu-runtime
+
+The libraries from should be installed in a system wide library path like `/usr/local/lib`
+The include files should be installed somewhere that is accesable from your CGO include path
+
+For x86:
+```
+cd /tmp && git clone https://github.com/google-coral/edgetpu.git && \
+cp edgetpu/libedgetpu/direct/k8/libedgetpu.so.1.0 /usr/local/lib/libedgetpu.so.1.0 && \
+ln -rs /usr/local/lib/libedgetpu.so.1.0 /usr/local/lib/libedgetpu.so.1 && \
+ln -rs /usr/local/lib/libedgetpu.so.1.0 /usr/local/lib/libedgetpu.so && \
+mkdir -p /usr/local/include/libedgetpu && \
+cp edgetpu/libedgetpu/edgetpu.h /usr/local/include/edgetpu.h && \
+cp edgetpu/libedgetpu/edgetpu_c.h /usr/local/include/edgetpu_c.h && \
+rm -Rf edgetpu
+```
+
+
+## License
+MIT
+
+## Author
+Yasuhrio Matsumoto (a.k.a. mattn)
+
diff --git a/vendor/github.com/mattn/go-tflite/callback.go b/vendor/github.com/mattn/go-tflite/callback.go
new file mode 100644
index 0000000..8d5f749
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/callback.go
@@ -0,0 +1,19 @@
+package tflite
+
+import "C"
+import (
+ "unsafe"
+
+ "github.com/mattn/go-pointer"
+)
+
+type callbackInfo struct {
+ user_data interface{}
+ f func(msg string, user_data interface{})
+}
+
+//export _go_error_reporter
+func _go_error_reporter(user_data unsafe.Pointer, msg *C.char) {
+ cb := pointer.Restore(user_data).(*callbackInfo)
+ cb.f(C.GoString(msg), cb.user_data)
+}
diff --git a/vendor/github.com/mattn/go-tflite/delegates/delegates.go b/vendor/github.com/mattn/go-tflite/delegates/delegates.go
new file mode 100644
index 0000000..79362e3
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/delegates/delegates.go
@@ -0,0 +1,14 @@
+package delegates
+
+import (
+ "unsafe"
+)
+
+type ModifyGraphWithDelegater interface {
+ ModifyGraphWithDelegate(Delegater)
+}
+
+type Delegater interface {
+ Delete()
+ Ptr() unsafe.Pointer
+}
diff --git a/vendor/github.com/mattn/go-tflite/delegates/edgetpu/edgetpu.go b/vendor/github.com/mattn/go-tflite/delegates/edgetpu/edgetpu.go
new file mode 100644
index 0000000..363dace
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/delegates/edgetpu/edgetpu.go
@@ -0,0 +1,104 @@
+package edgetpu
+
+// +build !windows
+
+/*
+#ifndef GO_EDGETPU_H
+#include "edgetpu.go.h"
+#include
+#endif
+#cgo LDFLAGS: -ledgetpu
+
+*/
+import "C"
+import (
+ "fmt"
+ "unsafe"
+
+ "github.com/mattn/go-tflite/delegates"
+)
+
+const (
+ // The Device Types
+ TypeApexPCI DeviceType = C.EDGETPU_APEX_PCI
+ TypeApexUSB DeviceType = C.EDGETPU_APEX_USB
+)
+
+type DeviceType uint32
+
+type Device struct {
+ Type DeviceType
+ Path string
+}
+
+// There are no options
+type DelegateOptions struct {
+}
+
+// Delegate is the tflite delegate
+type Delegate struct {
+ d *C.TfLiteDelegate
+}
+
+func New(device Device) delegates.Delegater {
+ var d *C.TfLiteDelegate
+ d = C.edgetpu_create_delegate(uint32(device.Type), C.CString(device.Path), nil, 0)
+ if d == nil {
+ return nil
+ }
+ return &Delegate{
+ d: d,
+ }
+}
+
+// Delete the delegate
+func (d *Delegate) Delete() {
+ C.edgetpu_free_delegate(d.d)
+}
+
+// Return a pointer
+func (d *Delegate) Ptr() unsafe.Pointer {
+ return unsafe.Pointer(d.d)
+}
+
+// Version fetches the EdgeTPU runtime version information
+func Version() (string, error) {
+ version := C.edgetpu_version()
+ if version == nil {
+ return "", fmt.Errorf("could not get version")
+ }
+ return C.GoString(version), nil
+}
+
+// Verbosity sets the edgetpu verbosity
+func Verbosity(v int) {
+ C.edgetpu_verbosity(C.int(v))
+}
+
+// DeviceList fetches a list of devices
+func DeviceList() ([]Device, error) {
+ // Fetch the list of devices
+ var numDevices C.size_t
+ cDevices := C.edgetpu_list_devices(&numDevices)
+
+ if cDevices == nil {
+ return nil, nil
+ }
+
+ // Cast the result to a Go slice
+ deviceSlice := (*[1024]C.struct_edgetpu_device)(unsafe.Pointer(cDevices))[:numDevices:numDevices]
+
+ // Convert the list to go struct
+ var devices []Device
+ for i := C.size_t(0); i < numDevices; i++ {
+ devices = append(devices, Device{
+ Type: DeviceType(deviceSlice[i]._type),
+ Path: C.GoString(deviceSlice[i].path),
+ })
+ }
+
+ // Free the list
+ C.edgetpu_free_devices(cDevices)
+
+ return devices, nil
+}
diff --git a/vendor/github.com/mattn/go-tflite/delegates/edgetpu/edgetpu.go.h b/vendor/github.com/mattn/go-tflite/delegates/edgetpu/edgetpu.go.h
new file mode 100644
index 0000000..565fdc7
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/delegates/edgetpu/edgetpu.go.h
@@ -0,0 +1,11 @@
+#ifndef GO_EDGETPU_H
+#define GO_EDGETPU_H
+
+#define _GNU_SOURCE
+#include
+#include
+#include
+#include
+#include
+
+#endif
diff --git a/vendor/github.com/mattn/go-tflite/tflite.go b/vendor/github.com/mattn/go-tflite/tflite.go
new file mode 100644
index 0000000..c062247
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/tflite.go
@@ -0,0 +1,258 @@
+package tflite
+
+/*
+#ifndef GO_TFLITE_H
+#include "tflite.go.h"
+#endif
+#cgo LDFLAGS: -ltensorflowlite_c
+#cgo android LDFLAGS: -ldl
+#cgo linux,!android LDFLAGS: -ldl -lrt
+*/
+import "C"
+import (
+ "reflect"
+ "unsafe"
+
+ "github.com/mattn/go-pointer"
+ "github.com/mattn/go-tflite/delegates"
+)
+
+//go:generate stringer -type TensorType,Status -output type_string.go .
+
+// Model is TfLiteModel.
+type Model struct {
+ m *C.TfLiteModel
+}
+
+// NewModel create new Model from buffer.
+func NewModel(model_data []byte) *Model {
+ m := C.TfLiteModelCreate(C.CBytes(model_data), C.size_t(len(model_data)))
+ if m == nil {
+ return nil
+ }
+ return &Model{m: m}
+}
+
+// NewModelFromFile create new Model from file data.
+func NewModelFromFile(model_path string) *Model {
+ ptr := C.CString(model_path)
+ defer C.free(unsafe.Pointer(ptr))
+
+ m := C.TfLiteModelCreateFromFile(ptr)
+ if m == nil {
+ return nil
+ }
+ return &Model{m: m}
+}
+
+// Delete delete instance of model.
+func (m *Model) Delete() {
+ if m != nil {
+ C.TfLiteModelDelete(m.m)
+ }
+}
+
+// InterpreterOptions implement TfLiteInterpreterOptions.
+type InterpreterOptions struct {
+ o *C.TfLiteInterpreterOptions
+}
+
+// NewInterpreterOptions create new InterpreterOptions.
+func NewInterpreterOptions() *InterpreterOptions {
+ o := C.TfLiteInterpreterOptionsCreate()
+ if o == nil {
+ return nil
+ }
+ return &InterpreterOptions{o: o}
+}
+
+// SetNumThread set number of threads.
+func (o *InterpreterOptions) SetNumThread(num_threads int) {
+ C.TfLiteInterpreterOptionsSetNumThreads(o.o, C.int32_t(num_threads))
+}
+
+// SetErrorRepoter set a function of reporter.
+func (o *InterpreterOptions) SetErrorReporter(f func(string, interface{}), user_data interface{}) {
+ C._TfLiteInterpreterOptionsSetErrorReporter(o.o, pointer.Save(&callbackInfo{
+ user_data: user_data,
+ f: f,
+ }))
+}
+
+func (o *InterpreterOptions) AddDelegate(d delegates.Delegater) {
+ C.TfLiteInterpreterOptionsAddDelegate(o.o, (*C.TfLiteDelegate)(d.Ptr()))
+}
+
+// Delete delete instance of InterpreterOptions.
+func (o *InterpreterOptions) Delete() {
+ if o != nil {
+ C.TfLiteInterpreterOptionsDelete(o.o)
+ }
+}
+
+// Interpreter implement TfLiteInterpreter.
+type Interpreter struct {
+ i *C.TfLiteInterpreter
+}
+
+// NewInterpreter create new Interpreter.
+func NewInterpreter(model *Model, options *InterpreterOptions) *Interpreter {
+ var o *C.TfLiteInterpreterOptions
+ if options != nil {
+ o = options.o
+ }
+ i := C.TfLiteInterpreterCreate(model.m, o)
+ if i == nil {
+ return nil
+ }
+ return &Interpreter{i: i}
+}
+
+// Delete delete instance of Interpreter.
+func (i *Interpreter) Delete() {
+ if i != nil {
+ C.TfLiteInterpreterDelete(i.i)
+ }
+}
+
+// Tensor implement TfLiteTensor.
+type Tensor struct {
+ t *C.TfLiteTensor
+}
+
+// GetInputTensorCount return number of input tensors.
+func (i *Interpreter) GetInputTensorCount() int {
+ return int(C.TfLiteInterpreterGetInputTensorCount(i.i))
+}
+
+// GetInputTensor return input tensor specified by index.
+func (i *Interpreter) GetInputTensor(index int) *Tensor {
+ t := C.TfLiteInterpreterGetInputTensor(i.i, C.int32_t(index))
+ if t == nil {
+ return nil
+ }
+ return &Tensor{t: t}
+}
+
+// State implement TfLiteStatus.
+type Status int
+
+const (
+ OK Status = 0
+ Error
+)
+
+// ResizeInputTensor resize the tensor specified by index with dims.
+func (i *Interpreter) ResizeInputTensor(index int, dims []int32) Status {
+ s := C.TfLiteInterpreterResizeInputTensor(i.i, C.int32_t(index), (*C.int32_t)(unsafe.Pointer(&dims[0])), C.int32_t(len(dims)))
+ return Status(s)
+}
+
+// AllocateTensor allocate tensors for the interpreter.
+func (i *Interpreter) AllocateTensors() Status {
+ if i != nil {
+ s := C.TfLiteInterpreterAllocateTensors(i.i)
+ return Status(s)
+ }
+ return Error
+}
+
+// Invoke invoke the task.
+func (i *Interpreter) Invoke() Status {
+ s := C.TfLiteInterpreterInvoke(i.i)
+ return Status(s)
+}
+
+// GetOutputTensorCount return number of output tensors.
+func (i *Interpreter) GetOutputTensorCount() int {
+ return int(C.TfLiteInterpreterGetOutputTensorCount(i.i))
+}
+
+// GetOutputTensor return output tensor specified by index.
+func (i *Interpreter) GetOutputTensor(index int) *Tensor {
+ t := C.TfLiteInterpreterGetOutputTensor(i.i, C.int32_t(index))
+ if t == nil {
+ return nil
+ }
+ return &Tensor{t: t}
+}
+
+// TensorType is types of the tensor.
+type TensorType int
+
+const (
+ NoType TensorType = 0
+ Float32 TensorType = 1
+ Int32 TensorType = 2
+ UInt8 TensorType = 3
+ Int64 TensorType = 4
+ String TensorType = 5
+ Bool TensorType = 6
+ Int16 TensorType = 7
+ Complex64 TensorType = 8
+ Int8 TensorType = 9
+)
+
+// Type return TensorType.
+func (t *Tensor) Type() TensorType {
+ return TensorType(C.TfLiteTensorType(t.t))
+}
+
+// NumDims return number of dimensions.
+func (t *Tensor) NumDims() int {
+ return int(C.TfLiteTensorNumDims(t.t))
+}
+
+// Dim return dimension of the element specified by index.
+func (t *Tensor) Dim(index int) int {
+ return int(C.TfLiteTensorDim(t.t, C.int32_t(index)))
+}
+
+// Shape return shape of the tensor.
+func (t *Tensor) Shape() []int {
+ shape := make([]int, t.NumDims())
+ for i := 0; i < t.NumDims(); i++ {
+ shape[i] = t.Dim(i)
+ }
+ return shape
+}
+
+// ByteSize return byte size of the tensor.
+func (t *Tensor) ByteSize() uint {
+ return uint(C.TfLiteTensorByteSize(t.t))
+}
+
+// Data return pointer of buffer.
+func (t *Tensor) Data() unsafe.Pointer {
+ return C.TfLiteTensorData(t.t)
+}
+
+// Name return name of the tensor.
+func (t *Tensor) Name() string {
+ return C.GoString(C.TfLiteTensorName(t.t))
+}
+
+// QuantizationParams implement TfLiteQuantizationParams.
+type QuantizationParams struct {
+ Scale float64
+ ZeroPoint int
+}
+
+// QuantizationParams return quantization parameters of the tensor.
+func (t *Tensor) QuantizationParams() QuantizationParams {
+ q := C.TfLiteTensorQuantizationParams(t.t)
+ return QuantizationParams{
+ Scale: float64(q.scale),
+ ZeroPoint: int(q.zero_point),
+ }
+}
+
+// CopyFromBuffer write buffer to the tensor.
+func (t *Tensor) CopyFromBuffer(b interface{}) Status {
+ return Status(C.TfLiteTensorCopyFromBuffer(t.t, unsafe.Pointer(reflect.ValueOf(b).Pointer()), C.size_t(t.ByteSize())))
+}
+
+// CopyToBuffer write buffer from the tensor.
+func (t *Tensor) CopyToBuffer(b interface{}) Status {
+ return Status(C.TfLiteTensorCopyToBuffer(t.t, unsafe.Pointer(reflect.ValueOf(b).Pointer()), C.size_t(t.ByteSize())))
+}
diff --git a/vendor/github.com/mattn/go-tflite/tflite.go.h b/vendor/github.com/mattn/go-tflite/tflite.go.h
new file mode 100644
index 0000000..a2570c3
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/tflite.go.h
@@ -0,0 +1,24 @@
+#ifndef GO_TFLITE_H
+#define GO_TFLITE_H
+
+#define _GNU_SOURCE
+#include
+#include
+#include
+#include
+
+extern void _go_error_reporter(void*, char*);
+
+static void
+_error_reporter(void *user_data, const char* format, va_list args) {
+ char *ptr;
+ if (asprintf(&ptr, format, args)) {}
+ _go_error_reporter(user_data, ptr);
+ free(ptr);
+}
+
+static void
+_TfLiteInterpreterOptionsSetErrorReporter(TfLiteInterpreterOptions* options, void* user_data) {
+ TfLiteInterpreterOptionsSetErrorReporter(options, _error_reporter, user_data);
+}
+#endif
diff --git a/vendor/github.com/mattn/go-tflite/tflite_experimental.go b/vendor/github.com/mattn/go-tflite/tflite_experimental.go
new file mode 100644
index 0000000..efc0e84
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/tflite_experimental.go
@@ -0,0 +1,388 @@
+package tflite
+
+/*
+#ifndef GO_TFLITE_EXPERIMENTAL_H
+#include "tflite_experimental.go.h"
+#endif
+
+typedef void* (*f_tflite_registration_init)(TfLiteContext* context, const char* buffer, size_t length);
+void* _tflite_registration_init(TfLiteContext* context, char* buffer, size_t length);
+
+typedef void (*f_tflite_registration_free)(TfLiteContext* context, void* buffer);
+void _tflite_registration_free(TfLiteContext* context, void* buffer);
+
+typedef TfLiteStatus (*f_tflite_registration_prepare)(TfLiteContext* context, TfLiteNode* node);
+TfLiteStatus _tflite_registration_prepare(TfLiteContext* context, TfLiteNode* node);
+
+typedef TfLiteStatus (*f_tflite_registration_invoke)(TfLiteContext* context, TfLiteNode* node);
+TfLiteStatus _tflite_registration_invoke(TfLiteContext* context, TfLiteNode* node);
+
+typedef const char* (*f_tflite_registration_profiling_string)(const TfLiteContext* context, const TfLiteNode* node);
+char* _tflite_registration_profiling_string(TfLiteContext* context, TfLiteNode* node);
+
+static TfLiteRegistration*
+_make_registration(void* o_init, void* o_free, void* o_prepare, void* o_invoke, void* o_profiling_string) {
+ TfLiteRegistration* r = (TfLiteRegistration*)malloc(sizeof(TfLiteRegistration));
+ r->init = (f_tflite_registration_init) o_init;
+ r->free = (f_tflite_registration_free) o_free;
+ r->prepare = (f_tflite_registration_prepare) o_prepare;
+ r->invoke = (f_tflite_registration_invoke) o_invoke;
+ r->profiling_string = (f_tflite_registration_profiling_string) o_profiling_string;
+ return r;
+}
+
+static void look_context(TfLiteContext *context) {
+ context->tensors;
+ TfLiteIntArray *plan = NULL;
+ context->GetExecutionPlan(context, &plan);
+ if (plan == NULL) return;
+ int i;
+ for (i = 0; i < plan->size; i++) {
+ TfLiteNode *node = NULL;
+ TfLiteRegistration *reg = NULL;
+ context->GetNodeAndRegistration(context, i, &node, ®);
+ printf("%s\n", reg->custom_name);
+ }
+}
+
+static void writeToTensorAsVector(TfLiteTensor *tensor, char *bytes, size_t size, int nelem) {
+ static TfLiteIntArray dummy;
+ TfLiteIntArray* new_shape = (TfLiteIntArray*)malloc(sizeof(dummy) + sizeof(dummy.data[0]) * 1);
+ if (new_shape) {
+ new_shape->size = 1;
+ new_shape->data[0] = nelem;
+ memcpy(new_shape->data, tensor->dims->data, tensor->dims->size * sizeof(int));
+ }
+
+ // TfLiteTensorDataFree
+ if (tensor->allocation_type == kTfLiteDynamic && tensor->data.raw) {
+ free(tensor->data.raw);
+ }
+ tensor->data.raw = NULL;
+
+ if (tensor->dims) free(tensor->dims);
+ if (tensor->quantization.type == kTfLiteAffineQuantization) {
+ TfLiteAffineQuantization* q_params =
+ (TfLiteAffineQuantization*)(tensor->quantization.params);
+ if (q_params->scale) {
+ free(q_params->scale);
+ q_params->scale = NULL;
+ }
+ if (q_params->zero_point) {
+ free(q_params->zero_point);
+ q_params->zero_point = NULL;
+ }
+ free(q_params);
+ }
+ tensor->dims = new_shape;
+ tensor->data.raw = bytes;
+ tensor->bytes = size;
+ tensor->allocation_type = kTfLiteMmapRo;
+
+ tensor->quantization.type = kTfLiteNoQuantization;
+ tensor->quantization.params = NULL;
+}
+*/
+import "C"
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "unsafe"
+)
+
+const sizeof_int32_t = 4
+
+// ResetVariableTensors resets variable tensors.
+func (i *Interpreter) ResetVariableTensors() Status {
+ return Status(C.TfLiteInterpreterResetVariableTensors(i.i))
+}
+
+/*
+type Registration interface {
+}
+
+func (o *InterpreterOptions) AddCustomOp(name string, reg *Registration, minVersion, maxVersion int) {
+ ptr := C.CString(name)
+ defer C.free(unsafe.Pointer(ptr))
+ r := C._make_registration()
+ C.TfLiteInterpreterOptionsAddCustomOp(o.o, ptr, r, C.int(minVersion), C.int(maxVersion))
+}
+
+type registration struct {
+ ccxt *C.TfLiteContext
+}
+
+//export _tflite_registration_init
+func _tflite_registration_init(ccxt *C.TfLiteContext, buffer *C.char, length C.size_t) unsafe.Pointer {
+ println("registration.init")
+ C.look_context(ccxt)
+
+ //var executionPlan *TfLiteIntArray
+ //status := ccxt.GetExecutionPlan(ccxt, &executionPlan)
+ //if status != C.kTfLiteOk {
+ //return nil
+ //}
+ //var registration *C.TfLiteRegistration
+ //var node *C.TfLiteNode
+ //for i := 0; i < executionPlan.size; i++ {
+ //ccxt.GetNodeAndRegistration(ccxt, 0, &node, ®istration)
+ //}
+
+ println(buffer, length)
+ return nil
+}
+
+//export _tflite_registration_free
+func _tflite_registration_free(ccxt *C.TfLiteContext, buffer unsafe.Pointer) {
+ println("registration.free")
+}
+
+//export _tflite_registration_prepare
+func _tflite_registration_prepare(ccxt *C.TfLiteContext, node *C.TfLiteNode) C.TfLiteStatus {
+ println("registration.prepare")
+ return C.kTfLiteOk
+}
+
+//export _tflite_registration_invoke
+func _tflite_registration_invoke(ccxt *C.TfLiteContext, node *C.TfLiteNode) C.TfLiteStatus {
+ println("registration.invoke")
+ return C.kTfLiteOk
+}
+
+//export _tflite_registration_profiling_string
+func _tflite_registration_profiling_string(ccxt *C.TfLiteContext, node *C.TfLiteNode) *C.char {
+ println("registration.profiling_string")
+ return nil
+}
+*/
+
+// ExtRegistration indicate registration structure.
+type ExpRegistration struct {
+ Init unsafe.Pointer
+ Free unsafe.Pointer
+ Prepare unsafe.Pointer
+ Invoke unsafe.Pointer
+ ProfilingString unsafe.Pointer
+}
+
+type BuiltinOperator int
+
+const (
+ BuiltinOperator_ADD BuiltinOperator = 0
+ BuiltinOperator_AVERAGE_POOL_2D BuiltinOperator = 1
+ BuiltinOperator_CONCATENATION BuiltinOperator = 2
+ BuiltinOperator_CONV_2D BuiltinOperator = 3
+ BuiltinOperator_DEPTHWISE_CONV_2D BuiltinOperator = 4
+ BuiltinOperator_DEQUANTIZE BuiltinOperator = 6
+ BuiltinOperator_EMBEDDING_LOOKUP BuiltinOperator = 7
+ BuiltinOperator_FLOOR BuiltinOperator = 8
+ BuiltinOperator_FULLY_CONNECTED BuiltinOperator = 9
+ BuiltinOperator_HASHTABLE_LOOKUP BuiltinOperator = 10
+ BuiltinOperator_L2_NORMALIZATION BuiltinOperator = 11
+ BuiltinOperator_L2_POOL_2D BuiltinOperator = 12
+ BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION BuiltinOperator = 13
+ BuiltinOperator_LOGISTIC BuiltinOperator = 14
+ BuiltinOperator_LSH_PROJECTION BuiltinOperator = 15
+ BuiltinOperator_LSTM BuiltinOperator = 16
+ BuiltinOperator_MAX_POOL_2D BuiltinOperator = 17
+ BuiltinOperator_MUL BuiltinOperator = 18
+ BuiltinOperator_RELU BuiltinOperator = 19
+ BuiltinOperator_RELU_N1_TO_1 BuiltinOperator = 20
+ BuiltinOperator_RELU6 BuiltinOperator = 21
+ BuiltinOperator_RESHAPE BuiltinOperator = 22
+ BuiltinOperator_RESIZE_BILINEAR BuiltinOperator = 23
+ BuiltinOperator_RNN BuiltinOperator = 24
+ BuiltinOperator_SOFTMAX BuiltinOperator = 25
+ BuiltinOperator_SPACE_TO_DEPTH BuiltinOperator = 26
+ BuiltinOperator_SVDF BuiltinOperator = 27
+ BuiltinOperator_TANH BuiltinOperator = 28
+ BuiltinOperator_CONCAT_EMBEDDINGS BuiltinOperator = 29
+ BuiltinOperator_SKIP_GRAM BuiltinOperator = 30
+ BuiltinOperator_CALL BuiltinOperator = 31
+ BuiltinOperator_CUSTOM BuiltinOperator = 32
+ BuiltinOperator_EMBEDDING_LOOKUP_SPARSE BuiltinOperator = 33
+ BuiltinOperator_PAD BuiltinOperator = 34
+ BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN BuiltinOperator = 35
+ BuiltinOperator_GATHER BuiltinOperator = 36
+ BuiltinOperator_BATCH_TO_SPACE_ND BuiltinOperator = 37
+ BuiltinOperator_SPACE_TO_BATCH_ND BuiltinOperator = 38
+ BuiltinOperator_TRANSPOSE BuiltinOperator = 39
+ BuiltinOperator_MEAN BuiltinOperator = 40
+ BuiltinOperator_SUB BuiltinOperator = 41
+ BuiltinOperator_DIV BuiltinOperator = 42
+ BuiltinOperator_SQUEEZE BuiltinOperator = 43
+ BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM BuiltinOperator = 44
+ BuiltinOperator_STRIDED_SLICE BuiltinOperator = 45
+ BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN BuiltinOperator = 46
+ BuiltinOperator_EXP BuiltinOperator = 47
+ BuiltinOperator_TOPK_V2 BuiltinOperator = 48
+ BuiltinOperator_SPLIT BuiltinOperator = 49
+ BuiltinOperator_LOG_SOFTMAX BuiltinOperator = 50
+ BuiltinOperator_DELEGATE BuiltinOperator = 51
+ BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM BuiltinOperator = 52
+ BuiltinOperator_CAST BuiltinOperator = 53
+ BuiltinOperator_PRELU BuiltinOperator = 54
+ BuiltinOperator_MAXIMUM BuiltinOperator = 55
+ BuiltinOperator_ARG_MAX BuiltinOperator = 56
+ BuiltinOperator_MINIMUM BuiltinOperator = 57
+ BuiltinOperator_LESS BuiltinOperator = 58
+ BuiltinOperator_NEG BuiltinOperator = 59
+ BuiltinOperator_PADV2 BuiltinOperator = 60
+ BuiltinOperator_GREATER BuiltinOperator = 61
+ BuiltinOperator_GREATER_EQUAL BuiltinOperator = 62
+ BuiltinOperator_LESS_EQUAL BuiltinOperator = 63
+ BuiltinOperator_SELECT BuiltinOperator = 64
+ BuiltinOperator_SLICE BuiltinOperator = 65
+ BuiltinOperator_SIN BuiltinOperator = 66
+ BuiltinOperator_TRANSPOSE_CONV BuiltinOperator = 67
+ BuiltinOperator_SPARSE_TO_DENSE BuiltinOperator = 68
+ BuiltinOperator_TILE BuiltinOperator = 69
+ BuiltinOperator_EXPAND_DIMS BuiltinOperator = 70
+ BuiltinOperator_EQUAL BuiltinOperator = 71
+ BuiltinOperator_NOT_EQUAL BuiltinOperator = 72
+ BuiltinOperator_LOG BuiltinOperator = 73
+ BuiltinOperator_SUM BuiltinOperator = 74
+ BuiltinOperator_SQRT BuiltinOperator = 75
+ BuiltinOperator_RSQRT BuiltinOperator = 76
+ BuiltinOperator_SHAPE BuiltinOperator = 77
+ BuiltinOperator_POW BuiltinOperator = 78
+ BuiltinOperator_ARG_MIN BuiltinOperator = 79
+ BuiltinOperator_FAKE_QUANT BuiltinOperator = 80
+ BuiltinOperator_REDUCE_PROD BuiltinOperator = 81
+ BuiltinOperator_REDUCE_MAX BuiltinOperator = 82
+ BuiltinOperator_PACK BuiltinOperator = 83
+ BuiltinOperator_LOGICAL_OR BuiltinOperator = 84
+ BuiltinOperator_ONE_HOT BuiltinOperator = 85
+ BuiltinOperator_LOGICAL_AND BuiltinOperator = 86
+ BuiltinOperator_LOGICAL_NOT BuiltinOperator = 87
+ BuiltinOperator_UNPACK BuiltinOperator = 88
+ BuiltinOperator_REDUCE_MIN BuiltinOperator = 89
+ BuiltinOperator_FLOOR_DIV BuiltinOperator = 90
+ BuiltinOperator_REDUCE_ANY BuiltinOperator = 91
+ BuiltinOperator_SQUARE BuiltinOperator = 92
+ BuiltinOperator_ZEROS_LIKE BuiltinOperator = 93
+ BuiltinOperator_FILL BuiltinOperator = 94
+ BuiltinOperator_FLOOR_MOD BuiltinOperator = 95
+ BuiltinOperator_RANGE BuiltinOperator = 96
+ BuiltinOperator_RESIZE_NEAREST_NEIGHBOR BuiltinOperator = 97
+ BuiltinOperator_LEAKY_RELU BuiltinOperator = 98
+ BuiltinOperator_SQUARED_DIFFERENCE BuiltinOperator = 99
+ BuiltinOperator_MIRROR_PAD BuiltinOperator = 100
+ BuiltinOperator_ABS BuiltinOperator = 101
+ BuiltinOperator_SPLIT_V BuiltinOperator = 102
+ BuiltinOperator_UNIQUE BuiltinOperator = 103
+ BuiltinOperator_CEIL BuiltinOperator = 104
+ BuiltinOperator_REVERSE_V2 BuiltinOperator = 105
+ BuiltinOperator_ADD_N BuiltinOperator = 106
+ BuiltinOperator_GATHER_ND BuiltinOperator = 107
+ BuiltinOperator_COS BuiltinOperator = 108
+ BuiltinOperator_WHERE BuiltinOperator = 109
+ BuiltinOperator_RANK BuiltinOperator = 110
+ BuiltinOperator_ELU BuiltinOperator = 111
+ BuiltinOperator_REVERSE_SEQUENCE BuiltinOperator = 112
+ BuiltinOperator_MATRIX_DIAG BuiltinOperator = 113
+ BuiltinOperator_QUANTIZE BuiltinOperator = 114
+ BuiltinOperator_MATRIX_SET_DIAG BuiltinOperator = 115
+ BuiltinOperator_MIN BuiltinOperator = BuiltinOperator_ADD
+ BuiltinOperator_MAX BuiltinOperator = BuiltinOperator_MATRIX_SET_DIAG
+)
+
+// ExpAddBuiltinOp add builtin op specified by code and registration. Current implementation is work in progress.
+func (o *InterpreterOptions) ExpAddBuiltinOp(op BuiltinOperator, reg *ExpRegistration, minVersion, maxVersion int) {
+ r := C._make_registration(
+ reg.Init,
+ reg.Free,
+ reg.Prepare,
+ reg.Invoke,
+ reg.ProfilingString,
+ )
+ C.TfLiteInterpreterOptionsAddBuiltinOp(o.o, C.TfLiteBuiltinOperator(op), r, C.int(minVersion), C.int(maxVersion))
+}
+
+// ExpAddCustomOp add custom op specified by name and registration. Current implementation is work in progress.
+func (o *InterpreterOptions) ExpAddCustomOp(name string, reg *ExpRegistration, minVersion, maxVersion int) {
+ ptr := C.CString(name)
+ defer C.free(unsafe.Pointer(ptr))
+ r := C._make_registration(
+ reg.Init,
+ reg.Free,
+ reg.Prepare,
+ reg.Invoke,
+ reg.ProfilingString,
+ )
+ C.TfLiteInterpreterOptionsAddCustomOp(o.o, ptr, r, C.int(minVersion), C.int(maxVersion))
+}
+
+// SetUseNNAPI enable or disable the NN API for the interpreter (true to enable).
+func (o *InterpreterOptions) SetUseNNAPI(enable bool) {
+ C.TfLiteInterpreterOptionsSetUseNNAPI(o.o, C.bool(enable))
+}
+
+// DynamicBuffer is buffer hold multiple strings.
+type DynamicBuffer struct {
+ data bytes.Buffer
+ offset []int
+}
+
+// AddString append to the dynamic buffer.
+func (d *DynamicBuffer) AddString(s string) {
+ b := []byte(s)
+ d.data.Write(b)
+ if len(d.offset) == 0 {
+ d.offset = append(d.offset, len(b))
+ } else {
+ d.offset = append(d.offset, d.offset[len(d.offset)-1]+len(b))
+ }
+}
+
+// WriteToTensorAsVector write buffer into the tensor as vector.
+func (d *DynamicBuffer) WriteToTensorAsVector(t *Tensor) {
+ var out bytes.Buffer
+
+ b := make([]byte, 4)
+
+ // Allocate sufficient memory to tensor buffer.
+ num_strings := len(d.offset)
+
+ // Set num of string
+ binary.LittleEndian.PutUint32(b, uint32(num_strings))
+ out.Write(b)
+
+ if num_strings > 0 {
+
+ // Set offset of strings.
+ start := sizeof_int32_t + sizeof_int32_t*(num_strings+1)
+ offset := start
+
+ binary.LittleEndian.PutUint32(b, uint32(offset))
+ out.Write(b)
+
+ for i := 0; i < len(d.offset); i++ {
+ offset := start + d.offset[i]
+ binary.LittleEndian.PutUint32(b, uint32(offset))
+ out.Write(b)
+ }
+
+ // Copy data of strings.
+ io.Copy(&out, &d.data)
+ }
+
+ b = out.Bytes()
+ C.writeToTensorAsVector(t.t, (*C.char)(unsafe.Pointer(&b[0])), C.size_t(len(b)), C.int(len(d.offset)))
+}
+
+// GetString returns string in the string buffer.
+func (t *Tensor) GetString(index int) string {
+ if t.Type() != String {
+ return ""
+ }
+ ptr := uintptr(t.Data())
+ count := int(*(*C.int32_t)(unsafe.Pointer(ptr)))
+ if index >= count {
+ return ""
+ }
+ offset1 := int(*(*C.int32_t)(unsafe.Pointer(ptr + uintptr(4*(index+1)))))
+ offset2 := int(*(*C.int32_t)(unsafe.Pointer(ptr + uintptr(4*(index+2)))))
+ return string((*((*[1<<31 - 1]uint8)(unsafe.Pointer(ptr))))[offset1:offset2])
+}
diff --git a/vendor/github.com/mattn/go-tflite/tflite_experimental.go.h b/vendor/github.com/mattn/go-tflite/tflite_experimental.go.h
new file mode 100644
index 0000000..6bedd70
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/tflite_experimental.go.h
@@ -0,0 +1,10 @@
+#ifndef GO_TFLITE_H
+#define GO_TFLITE_H
+
+#define _GNU_SOURCE
+#include
+#include
+#include
+#include
+#include
+#endif
diff --git a/vendor/github.com/mattn/go-tflite/tflite_type.go b/vendor/github.com/mattn/go-tflite/tflite_type.go
new file mode 100644
index 0000000..c6fb0d2
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/tflite_type.go
@@ -0,0 +1,198 @@
+package tflite
+
+/*
+#ifndef GO_TFLITE_H
+#include "tflite.go.h"
+#endif
+*/
+import "C"
+import "errors"
+
+var (
+ // ErrTypeMismatch is type mismatch.
+ ErrTypeMismatch = errors.New("type mismatch")
+ // ErrBadTensor is bad tensor.
+ ErrBadTensor = errors.New("bad tensor")
+)
+
+// SetInt32s sets int32s.
+func (t *Tensor) SetInt32s(v []int32) error {
+ if t.Type() != Int32 {
+ return ErrTypeMismatch
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return ErrBadTensor
+ }
+ n := t.ByteSize() / 4
+ to := (*((*[1<<29 - 1]int32)(ptr)))[:n]
+ copy(to, v)
+ return nil
+}
+
+// Int32s returns int32s.
+func (t *Tensor) Int32s() []int32 {
+ if t.Type() != Int32 {
+ return nil
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return nil
+ }
+ n := t.ByteSize() / 4
+ return (*((*[1<<29 - 1]int32)(ptr)))[:n]
+}
+
+// SetFloat32s sets float32s.
+func (t *Tensor) SetFloat32s(v []float32) error {
+ if t.Type() != Float32 {
+ return ErrTypeMismatch
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return ErrBadTensor
+ }
+ n := t.ByteSize() / 4
+ to := (*((*[1<<29 - 1]float32)(ptr)))[:n]
+ copy(to, v)
+ return nil
+}
+
+// Float32s returns float32s.
+func (t *Tensor) Float32s() []float32 {
+ if t.Type() != Float32 {
+ return nil
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return nil
+ }
+ n := t.ByteSize() / 4
+ return (*((*[1<<29 - 1]float32)(ptr)))[:n]
+}
+
+// Float32At returns float32 value located in the dimension.
+func (t *Tensor) Float32At(at ...int) float32 {
+ pos := 0
+ for i := 0; i < t.NumDims(); i++ {
+ pos = pos*t.Dim(i) + at[i]
+ }
+ return t.Float32s()[pos]
+}
+
+// SetUint8s sets uint8s.
+func (t *Tensor) SetUint8s(v []uint8) error {
+ if t.Type() != UInt8 {
+ return ErrTypeMismatch
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return ErrBadTensor
+ }
+ n := t.ByteSize()
+ to := (*((*[1<<29 - 1]uint8)(ptr)))[:n]
+ copy(to, v)
+ return nil
+}
+
+// UInt8s returns uint8s.
+func (t *Tensor) UInt8s() []uint8 {
+ if t.Type() != UInt8 {
+ return nil
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return nil
+ }
+ n := t.ByteSize()
+ return (*((*[1<<29 - 1]uint8)(ptr)))[:n]
+}
+
+// SetInt64s sets int64s.
+func (t *Tensor) SetInt64s(v []int64) error {
+ if t.Type() != Int64 {
+ return ErrTypeMismatch
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return ErrBadTensor
+ }
+ n := t.ByteSize() / 8
+ to := (*((*[1<<28 - 1]int64)(ptr)))[:n]
+ copy(to, v)
+ return nil
+}
+
+// Int64s returns int64s.
+func (t *Tensor) Int64s() []int64 {
+ if t.Type() != Int64 {
+ return nil
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return nil
+ }
+ n := t.ByteSize() / 8
+ return (*((*[1<<28 - 1]int64)(ptr)))[:n]
+}
+
+// SetInt16s sets int16s.
+func (t *Tensor) SetInt16s(v []int16) error {
+ if t.Type() != Int16 {
+ return ErrTypeMismatch
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return ErrBadTensor
+ }
+ n := t.ByteSize() / 2
+ to := (*((*[1<<29 - 1]int16)(ptr)))[:n]
+ copy(to, v)
+ return nil
+}
+
+// Int16s returns int16s.
+func (t *Tensor) Int16s() []int16 {
+ if t.Type() != Int16 {
+ return nil
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return nil
+ }
+ n := t.ByteSize() / 2
+ return (*((*[1<<29 - 1]int16)(ptr)))[:n]
+}
+
+// SetInt8s sets int8s.
+func (t *Tensor) SetInt8s(v []int8) error {
+ if t.Type() != Int8 {
+ return ErrTypeMismatch
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return ErrBadTensor
+ }
+ n := t.ByteSize()
+ to := (*((*[1<<29 - 1]int8)(ptr)))[:n]
+ copy(to, v)
+ return nil
+}
+
+// Int8s returns int8s.
+func (t *Tensor) Int8s() []int8 {
+ if t.Type() != Int8 {
+ return nil
+ }
+ ptr := C.TfLiteTensorData(t.t)
+ if ptr == nil {
+ return nil
+ }
+ n := t.ByteSize()
+ return (*((*[1<<29 - 1]int8)(ptr)))[:n]
+}
+
+// String returns name of tensor.
+func (t *Tensor) String() string {
+ return t.Name()
+}
diff --git a/vendor/github.com/mattn/go-tflite/type_string.go b/vendor/github.com/mattn/go-tflite/type_string.go
new file mode 100644
index 0000000..ee24f05
--- /dev/null
+++ b/vendor/github.com/mattn/go-tflite/type_string.go
@@ -0,0 +1,27 @@
+// Code generated by "stringer -type TensorType,Status -output type_string.go ."; DO NOT EDIT.
+
+package tflite
+
+import "strconv"
+
+const _TensorType_name = "NoTypeFloat32Int32UInt8Int64StringBoolInt16Complex64Int8"
+
+var _TensorType_index = [...]uint8{0, 6, 13, 18, 23, 28, 34, 38, 43, 52, 56}
+
+func (i TensorType) String() string {
+ if i < 0 || i >= TensorType(len(_TensorType_index)-1) {
+ return "TensorType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _TensorType_name[_TensorType_index[i]:_TensorType_index[i+1]]
+}
+
+const _Status_name = "OK"
+
+var _Status_index = [...]uint8{0, 2}
+
+func (i Status) String() string {
+ if i < 0 || i >= Status(len(_Status_index)-1) {
+ return "Status(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Status_name[_Status_index[i]:_Status_index[i+1]]
+}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
new file mode 100644
index 0000000..571116c
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.codecov.yml
@@ -0,0 +1,19 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
+# Also update COVER_IGNORE_PKGS in the Makefile.
+ignore:
+ - /internal/gen-atomicint/
+ - /internal/gen-valuewrapper/
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
new file mode 100644
index 0000000..c3fa253
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -0,0 +1,12 @@
+/bin
+.DS_Store
+/vendor
+cover.html
+cover.out
+lint.log
+
+# Binaries
+*.test
+
+# Profiling output
+*.prof
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
new file mode 100644
index 0000000..13d0a4f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.travis.yml
@@ -0,0 +1,27 @@
+sudo: false
+language: go
+go_import_path: go.uber.org/atomic
+
+env:
+ global:
+ - GO111MODULE=on
+
+matrix:
+ include:
+ - go: oldstable
+ - go: stable
+ env: LINT=1
+
+cache:
+ directories:
+ - vendor
+
+before_install:
+ - go version
+
+script:
+ - test -z "$LINT" || make lint
+ - make cover
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
new file mode 100644
index 0000000..24c0274
--- /dev/null
+++ b/vendor/go.uber.org/atomic/CHANGELOG.md
@@ -0,0 +1,76 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [1.7.0] - 2020-09-14
+### Added
+- Support JSON serialization and deserialization of primitive atomic types.
+- Support Text marshalling and unmarshalling for string atomics.
+
+### Changed
+- Disallow incorrect comparison of atomic values in a non-atomic way.
+
+### Removed
+- Remove dependency on `golang.org/x/{lint, tools}`.
+
+## [1.6.0] - 2020-02-24
+### Changed
+- Drop library dependency on `golang.org/x/{lint, tools}`.
+
+## [1.5.1] - 2019-11-19
+- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
+ causing `CAS` to fail even though the old value matches.
+
+## [1.5.0] - 2019-10-29
+### Changed
+- With Go modules, only the `go.uber.org/atomic` import path is supported now.
+ If you need to use the old import path, please add a `replace` directive to
+ your `go.mod`.
+
+## [1.4.0] - 2019-05-01
+### Added
+ - Add `atomic.Error` type for atomic operations on `error` values.
+
+## [1.3.2] - 2018-05-02
+### Added
+- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
+
+## [1.3.1] - 2017-11-14
+### Fixed
+- Revert optimization for `atomic.String.Store("")` which caused data races.
+
+## [1.3.0] - 2017-11-13
+### Added
+- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
+
+### Changed
+- Optimize `atomic.String.Store("")` by avoiding an allocation.
+
+## [1.2.0] - 2017-04-12
+### Added
+- Shadow `atomic.Value` from `sync/atomic`.
+
+## [1.1.0] - 2017-03-10
+### Added
+- Add atomic `Float64` type.
+
+### Changed
+- Support new `go.uber.org/atomic` import path.
+
+## [1.0.0] - 2016-07-18
+
+- Initial release.
+
+[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
+[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
+[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
+[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
+[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
+[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
+[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
+[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
+[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
+[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
+[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
new file mode 100644
index 0000000..8765c9f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
new file mode 100644
index 0000000..1b1376d
--- /dev/null
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -0,0 +1,78 @@
+# Directory to place `go install`ed binaries into.
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+GEN_ATOMICINT = $(GOBIN)/gen-atomicint
+GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
+STATICCHECK = $(GOBIN)/staticcheck
+
+GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
+
+# Also update ignore section in .codecov.yml.
+COVER_IGNORE_PKGS = \
+ go.uber.org/atomic/internal/gen-atomicint \
+ go.uber.org/atomic/internal/gen-atomicwrapper
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
+
+$(GOLINT):
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK):
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
+ go build -o $@ ./internal/gen-atomicwrapper
+
+$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
+ go build -o $@ ./internal/gen-atomicint
+
+.PHONY: golint
+golint: $(GOLINT)
+ $(GOLINT) ./...
+
+.PHONY: staticcheck
+staticcheck: $(STATICCHECK)
+ $(STATICCHECK) ./...
+
+.PHONY: lint
+lint: gofmt golint staticcheck generatenodirty
+
+# comma separated list of packages to consider for code coverage.
+COVER_PKG = $(shell \
+ go list -find ./... | \
+ grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
+ paste -sd, -)
+
+.PHONY: cover
+cover:
+ go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
+ go tool cover -html=cover.out -o cover.html
+
+.PHONY: generate
+generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
+ go generate ./...
+
+.PHONY: generatenodirty
+generatenodirty:
+ @[ -z "$$(git status --porcelain)" ] || ( \
+ echo "Working tree is dirty. Commit your changes first."; \
+ exit 1 )
+ @make generate
+ @status=$$(git status --porcelain); \
+ [ -z "$$status" ] || ( \
+ echo "Working tree is dirty after `make generate`:"; \
+ echo "$$status"; \
+ echo "Please ensure that the generated code is up-to-date." )
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
new file mode 100644
index 0000000..ade0c20
--- /dev/null
+++ b/vendor/go.uber.org/atomic/README.md
@@ -0,0 +1,63 @@
+# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
+
+Simple wrappers for primitive types to enforce atomic access.
+
+## Installation
+
+```shell
+$ go get -u go.uber.org/atomic@v1
+```
+
+### Legacy Import Path
+
+As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
+of using this package. If you are using Go modules, this package will fail to
+compile with the legacy import path path `github.com/uber-go/atomic`.
+
+We recommend migrating your code to the new import path but if you're unable
+to do so, or if your dependencies are still using the old import path, you
+will have to add a `replace` directive to your `go.mod` file downgrading the
+legacy import path to an older version.
+
+```
+replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
+```
+
+You can do so automatically by running the following command.
+
+```shell
+$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
+```
+
+## Usage
+
+The standard library's `sync/atomic` is powerful, but it's easy to forget which
+variables must be accessed atomically. `go.uber.org/atomic` preserves all the
+functionality of the standard library, but wraps the primitive types to
+provide a safer, more convenient API.
+
+```go
+var atom atomic.Uint32
+atom.Store(42)
+atom.Sub(2)
+atom.CAS(40, 11)
+```
+
+See the [documentation][doc] for a complete API specification.
+
+## Development Status
+
+Stable.
+
+---
+
+Released under the [MIT License](LICENSE.txt).
+
+[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
+[doc]: https://godoc.org/go.uber.org/atomic
+[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
+[ci]: https://travis-ci.com/uber-go/atomic
+[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/atomic
+[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
+[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
new file mode 100644
index 0000000..9cf1914
--- /dev/null
+++ b/vendor/go.uber.org/atomic/bool.go
@@ -0,0 +1,81 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+)
+
+// Bool is an atomic type-safe wrapper for bool values.
+type Bool struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint32
+}
+
+var _zeroBool bool
+
+// NewBool creates a new Bool.
+func NewBool(v bool) *Bool {
+ x := &Bool{}
+ if v != _zeroBool {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped bool.
+func (x *Bool) Load() bool {
+ return truthy(x.v.Load())
+}
+
+// Store atomically stores the passed bool.
+func (x *Bool) Store(v bool) {
+ x.v.Store(boolToInt(v))
+}
+
+// CAS is an atomic compare-and-swap for bool values.
+func (x *Bool) CAS(o, n bool) bool {
+ return x.v.CAS(boolToInt(o), boolToInt(n))
+}
+
+// Swap atomically stores the given bool and returns the old
+// value.
+func (x *Bool) Swap(o bool) bool {
+ return truthy(x.v.Swap(boolToInt(o)))
+}
+
+// MarshalJSON encodes the wrapped bool into JSON.
+func (x *Bool) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a bool from JSON.
+func (x *Bool) UnmarshalJSON(b []byte) error {
+ var v bool
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
new file mode 100644
index 0000000..c7bf7a8
--- /dev/null
+++ b/vendor/go.uber.org/atomic/bool_ext.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go
+
+func truthy(n uint32) bool {
+ return n == 1
+}
+
+func boolToInt(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// Toggle atomically negates the Boolean and returns the previous value.
+func (b *Bool) Toggle() bool {
+ for {
+ old := b.Load()
+ if b.CAS(old, !old) {
+ return old
+ }
+ }
+}
+
+// String encodes the wrapped value as a string.
+func (b *Bool) String() string {
+ return strconv.FormatBool(b.Load())
+}
diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go
new file mode 100644
index 0000000..ae7390e
--- /dev/null
+++ b/vendor/go.uber.org/atomic/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package atomic provides simple wrappers around numerics to enforce atomic
+// access.
+package atomic
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
new file mode 100644
index 0000000..027cfcb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/duration.go
@@ -0,0 +1,82 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// Duration is an atomic type-safe wrapper for time.Duration values.
+type Duration struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Int64
+}
+
+var _zeroDuration time.Duration
+
+// NewDuration creates a new Duration.
+func NewDuration(v time.Duration) *Duration {
+ x := &Duration{}
+ if v != _zeroDuration {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped time.Duration.
+func (x *Duration) Load() time.Duration {
+ return time.Duration(x.v.Load())
+}
+
+// Store atomically stores the passed time.Duration.
+func (x *Duration) Store(v time.Duration) {
+ x.v.Store(int64(v))
+}
+
+// CAS is an atomic compare-and-swap for time.Duration values.
+func (x *Duration) CAS(o, n time.Duration) bool {
+ return x.v.CAS(int64(o), int64(n))
+}
+
+// Swap atomically stores the given time.Duration and returns the old
+// value.
+func (x *Duration) Swap(o time.Duration) time.Duration {
+ return time.Duration(x.v.Swap(int64(o)))
+}
+
+// MarshalJSON encodes the wrapped time.Duration into JSON.
+func (x *Duration) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a time.Duration from JSON.
+func (x *Duration) UnmarshalJSON(b []byte) error {
+ var v time.Duration
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
new file mode 100644
index 0000000..6273b66
--- /dev/null
+++ b/vendor/go.uber.org/atomic/duration_ext.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "time"
+
+//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
+
+// Add atomically adds to the wrapped time.Duration and returns the new value.
+func (d *Duration) Add(n time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(n)))
+}
+
+// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
+func (d *Duration) Sub(n time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(n)))
+}
+
+// String encodes the wrapped value as a string.
+func (d *Duration) String() string {
+ return d.Load().String()
+}
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
new file mode 100644
index 0000000..a6166fb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error.go
@@ -0,0 +1,51 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// Error is an atomic type-safe wrapper for error values.
+type Error struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroError error
+
+// NewError creates a new Error.
+func NewError(v error) *Error {
+ x := &Error{}
+ if v != _zeroError {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped error.
+func (x *Error) Load() error {
+ return unpackError(x.v.Load())
+}
+
+// Store atomically stores the passed error.
+func (x *Error) Store(v error) {
+ x.v.Store(packError(v))
+}
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go
new file mode 100644
index 0000000..ffe0be2
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error_ext.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// atomic.Value panics on nil inputs, or if the underlying type changes.
+// Stabilize by always storing a custom struct that we control.
+
+//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go
+
+type packedError struct{ Value error }
+
+func packError(v error) interface{} {
+ return packedError{v}
+}
+
+func unpackError(v interface{}) error {
+ if err, ok := v.(packedError); ok {
+ return err.Value
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
new file mode 100644
index 0000000..0719060
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float64.go
@@ -0,0 +1,76 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+)
+
+// Float64 is an atomic type-safe wrapper for float64 values.
+type Float64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint64
+}
+
+var _zeroFloat64 float64
+
+// NewFloat64 creates a new Float64.
+func NewFloat64(v float64) *Float64 {
+ x := &Float64{}
+ if v != _zeroFloat64 {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped float64.
+func (x *Float64) Load() float64 {
+ return math.Float64frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float64.
+func (x *Float64) Store(v float64) {
+ x.v.Store(math.Float64bits(v))
+}
+
+// CAS is an atomic compare-and-swap for float64 values.
+func (x *Float64) CAS(o, n float64) bool {
+ return x.v.CAS(math.Float64bits(o), math.Float64bits(n))
+}
+
+// MarshalJSON encodes the wrapped float64 into JSON.
+func (x *Float64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float64 from JSON.
+func (x *Float64) UnmarshalJSON(b []byte) error {
+ var v float64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
new file mode 100644
index 0000000..927b1ad
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float64_ext.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "strconv"
+
+//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go
+
+// Add atomically adds to the wrapped float64 and returns the new value.
+func (f *Float64) Add(s float64) float64 {
+ for {
+ old := f.Load()
+ new := old + s
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float64 and returns the new value.
+func (f *Float64) Sub(s float64) float64 {
+ return f.Add(-s)
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float64) String() string {
+ // 'g' is the behavior for floats with %v.
+ return strconv.FormatFloat(f.Load(), 'g', -1, 64)
+}
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
new file mode 100644
index 0000000..50d6b24
--- /dev/null
+++ b/vendor/go.uber.org/atomic/gen.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
+//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
+//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
+//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
new file mode 100644
index 0000000..18ae564
--- /dev/null
+++ b/vendor/go.uber.org/atomic/int32.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int32 is an atomic wrapper around int32.
+type Int32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int32
+}
+
+// NewInt32 creates a new Int32.
+func NewInt32(i int32) *Int32 {
+ return &Int32{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int32) Load() int32 {
+ return atomic.LoadInt32(&i.v)
+}
+
+// Add atomically adds to the wrapped int32 and returns the new value.
+func (i *Int32) Add(n int32) int32 {
+ return atomic.AddInt32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int32 and returns the new value.
+func (i *Int32) Sub(n int32) int32 {
+ return atomic.AddInt32(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int32 and returns the new value.
+func (i *Int32) Inc() int32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Int32) Dec() int32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int32) CAS(old, new int32) bool {
+ return atomic.CompareAndSwapInt32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int32) Store(n int32) {
+ atomic.StoreInt32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int32 and returns the old value.
+func (i *Int32) Swap(n int32) int32 {
+ return atomic.SwapInt32(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped int32 into JSON.
+func (i *Int32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int32.
+func (i *Int32) UnmarshalJSON(b []byte) error {
+ var v int32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int32) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
new file mode 100644
index 0000000..2bcbbfa
--- /dev/null
+++ b/vendor/go.uber.org/atomic/int64.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int64 is an atomic wrapper around int64.
+type Int64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int64
+}
+
+// NewInt64 creates a new Int64.
+func NewInt64(i int64) *Int64 {
+ return &Int64{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int64) Load() int64 {
+ return atomic.LoadInt64(&i.v)
+}
+
+// Add atomically adds to the wrapped int64 and returns the new value.
+func (i *Int64) Add(n int64) int64 {
+ return atomic.AddInt64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int64 and returns the new value.
+func (i *Int64) Sub(n int64) int64 {
+ return atomic.AddInt64(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int64 and returns the new value.
+func (i *Int64) Inc() int64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int64 and returns the new value.
+func (i *Int64) Dec() int64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int64) CAS(old, new int64) bool {
+ return atomic.CompareAndSwapInt64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int64) Store(n int64) {
+ atomic.StoreInt64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int64 and returns the old value.
+func (i *Int64) Swap(n int64) int64 {
+ return atomic.SwapInt64(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped int64 into JSON.
+func (i *Int64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int64.
+func (i *Int64) UnmarshalJSON(b []byte) error {
+ var v int64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int64) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
new file mode 100644
index 0000000..a8201cb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/nocmp.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// nocmp is an uncomparable struct. Embed this inside another struct to make
+// it uncomparable.
+//
+// type Foo struct {
+// nocmp
+// // ...
+// }
+//
+// This DOES NOT:
+//
+// - Disallow shallow copies of structs
+// - Disallow comparison of pointers to uncomparable structs
+type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
new file mode 100644
index 0000000..225b7a2
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string.go
@@ -0,0 +1,54 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// String is an atomic type-safe wrapper for string values.
+type String struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroString string
+
+// NewString creates a new String.
+func NewString(v string) *String {
+ x := &String{}
+ if v != _zeroString {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped string.
+func (x *String) Load() string {
+ if v := x.v.Load(); v != nil {
+ return v.(string)
+ }
+ return _zeroString
+}
+
+// Store atomically stores the passed string.
+func (x *String) Store(v string) {
+ x.v.Store(v)
+}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
new file mode 100644
index 0000000..3a95582
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string_ext.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
+
+// String returns the wrapped value.
+func (s *String) String() string {
+ return s.Load()
+}
+
+// MarshalText encodes the wrapped string into a textual form.
+//
+// This makes it encodable as JSON, YAML, XML, and more.
+func (s *String) MarshalText() ([]byte, error) {
+ return []byte(s.Load()), nil
+}
+
+// UnmarshalText decodes text and replaces the wrapped string with it.
+//
+// This makes it decodable from JSON, YAML, XML, and more.
+func (s *String) UnmarshalText(b []byte) error {
+ s.Store(string(b))
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
new file mode 100644
index 0000000..a973aba
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uint32.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint32 is an atomic wrapper around uint32.
+type Uint32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint32
+}
+
+// NewUint32 creates a new Uint32.
+func NewUint32(i uint32) *Uint32 {
+ return &Uint32{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint32) Load() uint32 {
+ return atomic.LoadUint32(&i.v)
+}
+
+// Add atomically adds to the wrapped uint32 and returns the new value.
+func (i *Uint32) Add(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint32 and returns the new value.
+func (i *Uint32) Sub(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint32 and returns the new value.
+func (i *Uint32) Inc() uint32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint32 and returns the new value.
+func (i *Uint32) Dec() uint32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint32) CAS(old, new uint32) bool {
+ return atomic.CompareAndSwapUint32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint32) Store(n uint32) {
+ atomic.StoreUint32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint32 and returns the old value.
+func (i *Uint32) Swap(n uint32) uint32 {
+ return atomic.SwapUint32(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped uint32 into JSON.
+func (i *Uint32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint32.
+func (i *Uint32) UnmarshalJSON(b []byte) error {
+ var v uint32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint32) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
new file mode 100644
index 0000000..3b6c71f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uint64.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint64 is an atomic wrapper around uint64.
+type Uint64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint64
+}
+
+// NewUint64 creates a new Uint64.
+func NewUint64(i uint64) *Uint64 {
+ return &Uint64{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint64) Load() uint64 {
+ return atomic.LoadUint64(&i.v)
+}
+
+// Add atomically adds to the wrapped uint64 and returns the new value.
+func (i *Uint64) Add(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint64 and returns the new value.
+func (i *Uint64) Sub(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint64 and returns the new value.
+func (i *Uint64) Inc() uint64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint64 and returns the new value.
+func (i *Uint64) Dec() uint64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint64) CAS(old, new uint64) bool {
+ return atomic.CompareAndSwapUint64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint64) Store(n uint64) {
+ atomic.StoreUint64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint64 and returns the old value.
+func (i *Uint64) Swap(n uint64) uint64 {
+ return atomic.SwapUint64(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped uint64 into JSON.
+func (i *Uint64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint64.
+func (i *Uint64) UnmarshalJSON(b []byte) error {
+ var v uint64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint64) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
new file mode 100644
index 0000000..671f3a3
--- /dev/null
+++ b/vendor/go.uber.org/atomic/value.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "sync/atomic"
+
+// Value shadows the type of the same name from sync/atomic
+// https://godoc.org/sync/atomic#Value
+type Value struct {
+ atomic.Value
+
+ _ nocmp // disallow non-atomic comparison
+}
diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml
new file mode 100644
index 0000000..6d4d1be
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.codecov.yml
@@ -0,0 +1,15 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore
new file mode 100644
index 0000000..b9a05e3
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+cover.html
+cover.out
+/bin
diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml
new file mode 100644
index 0000000..8636ab4
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.travis.yml
@@ -0,0 +1,23 @@
+sudo: false
+language: go
+go_import_path: go.uber.org/multierr
+
+env:
+ global:
+ - GO111MODULE=on
+
+go:
+ - oldstable
+ - stable
+
+before_install:
+- go version
+
+script:
+- |
+ set -e
+ make lint
+ make cover
+
+after_success:
+- bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
new file mode 100644
index 0000000..6f1db9e
--- /dev/null
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -0,0 +1,60 @@
+Releases
+========
+
+v1.6.0 (2020-09-14)
+===================
+
+- Actually drop library dependency on development-time tooling.
+
+
+v1.5.0 (2020-02-24)
+===================
+
+- Drop library dependency on development-time tooling.
+
+
+v1.4.0 (2019-11-04)
+===================
+
+- Add `AppendInto` function to more ergonomically build errors inside a
+ loop.
+
+
+v1.3.0 (2019-10-29)
+===================
+
+- Switch to Go modules.
+
+
+v1.2.0 (2019-09-26)
+===================
+
+- Support extracting and matching against wrapped errors with `errors.As`
+ and `errors.Is`.
+
+
+v1.1.0 (2017-06-30)
+===================
+
+- Added an `Errors(error) []error` function to extract the underlying list of
+ errors for a multierr error.
+
+
+v1.0.0 (2017-05-31)
+===================
+
+No changes since v0.2.0. This release is committing to making no breaking
+changes to the current API in the 1.X series.
+
+
+v0.2.0 (2017-04-11)
+===================
+
+- Repeatedly appending to the same error is now faster due to fewer
+ allocations.
+
+
+v0.1.0 (2017-31-03)
+===================
+
+- Initial release
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
new file mode 100644
index 0000000..858e024
--- /dev/null
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
new file mode 100644
index 0000000..3160044
--- /dev/null
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -0,0 +1,42 @@
+# Directory to put `go install`ed binaries in.
+export GOBIN ?= $(shell pwd)/bin
+
+GO_FILES := $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false)
+
+.PHONY: golint
+golint:
+ @cd tools && go install golang.org/x/lint/golint
+ @$(GOBIN)/golint ./...
+
+.PHONY: staticcheck
+staticcheck:
+ @cd tools && go install honnef.co/go/tools/cmd/staticcheck
+ @$(GOBIN)/staticcheck ./...
+
+.PHONY: lint
+lint: gofmt golint staticcheck
+
+.PHONY: cover
+cover:
+ go test -coverprofile=cover.out -coverpkg=./... -v ./...
+ go tool cover -html=cover.out -o cover.html
+
+update-license:
+ @cd tools && go install go.uber.org/tools/update-license
+ @$(GOBIN)/update-license $(GO_FILES)
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
new file mode 100644
index 0000000..751bd65
--- /dev/null
+++ b/vendor/go.uber.org/multierr/README.md
@@ -0,0 +1,23 @@
+# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+`multierr` allows combining one or more Go `error`s together.
+
+## Installation
+
+ go get -u go.uber.org/multierr
+
+## Status
+
+Stable: No breaking changes will be made before 2.0.
+
+-------------------------------------------------------------------------------
+
+Released under the [MIT License].
+
+[MIT License]: LICENSE.txt
+[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg
+[doc]: https://godoc.org/go.uber.org/multierr
+[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master
+[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
+[ci]: https://travis-ci.com/uber-go/multierr
+[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
new file mode 100644
index 0000000..5c9b67d
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error.go
@@ -0,0 +1,449 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package multierr allows combining one or more errors together.
+//
+// Overview
+//
+// Errors can be combined with the use of the Combine function.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
+//
+// If only two errors are being combined, the Append function may be used
+// instead.
+//
+// err = multierr.Append(reader.Close(), writer.Close())
+//
+// This makes it possible to record resource cleanup failures from deferred
+// blocks with the help of named return values.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
+//
+// The underlying list of errors for a returned error object may be retrieved
+// with the Errors function.
+//
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
+//
+// Advanced Usage
+//
+// Errors returned by Combine and Append MAY implement the following
+// interface.
+//
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
+//
+// Note that if you need access to list of errors behind a multierr error, you
+// should prefer using the Errors function. That said, if you need cheap
+// read-only access to the underlying errors slice, you can attempt to cast
+// the error to this interface. You MUST handle the failure case gracefully
+// because errors returned by Combine and Append are not guaranteed to
+// implement this interface.
+//
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
+package multierr // import "go.uber.org/multierr"
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+
+ "go.uber.org/atomic"
+)
+
+var (
+ // Separator for single-line error messages.
+ _singlelineSeparator = []byte("; ")
+
+ // Prefix for multi-line messages
+ _multilinePrefix = []byte("the following errors occurred:")
+
+ // Prefix for the first and following lines of an item in a list of
+ // multi-line error messages.
+ //
+ // For example, if a single item is:
+ //
+ // foo
+ // bar
+ //
+ // It will become,
+ //
+ // - foo
+ // bar
+ _multilineSeparator = []byte("\n - ")
+ _multilineIndent = []byte(" ")
+)
+
+// _bufferPool is a pool of bytes.Buffers.
+var _bufferPool = sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+}
+
+type errorGroup interface {
+ Errors() []error
+}
+
+// Errors returns a slice containing zero or more errors that the supplied
+// error is composed of. If the error is nil, a nil slice is returned.
+//
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
+//
+// If the error is not composed of other errors, the returned slice contains
+// just the error that was passed in.
+//
+// Callers of this function are free to modify the returned slice.
+func Errors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ errors := eg.Errors()
+ result := make([]error, len(errors))
+ copy(result, errors)
+ return result
+}
+
+// multiError is an error that holds one or more errors.
+//
+// An instance of this is guaranteed to be non-empty and flattened. That is,
+// none of the errors inside multiError are other multiErrors.
+//
+// multiError formats to a semi-colon delimited list of error messages with
+// %v and with a more readable multi-line format with %+v.
+type multiError struct {
+ copyNeeded atomic.Bool
+ errors []error
+}
+
+var _ errorGroup = (*multiError)(nil)
+
+// Errors returns the list of underlying errors.
+//
+// This slice MUST NOT be modified.
+func (merr *multiError) Errors() []error {
+ if merr == nil {
+ return nil
+ }
+ return merr.errors
+}
+
+func (merr *multiError) Error() string {
+ if merr == nil {
+ return ""
+ }
+
+ buff := _bufferPool.Get().(*bytes.Buffer)
+ buff.Reset()
+
+ merr.writeSingleline(buff)
+
+ result := buff.String()
+ _bufferPool.Put(buff)
+ return result
+}
+
+func (merr *multiError) Format(f fmt.State, c rune) {
+ if c == 'v' && f.Flag('+') {
+ merr.writeMultiline(f)
+ } else {
+ merr.writeSingleline(f)
+ }
+}
+
+func (merr *multiError) writeSingleline(w io.Writer) {
+ first := true
+ for _, item := range merr.errors {
+ if first {
+ first = false
+ } else {
+ w.Write(_singlelineSeparator)
+ }
+ io.WriteString(w, item.Error())
+ }
+}
+
+func (merr *multiError) writeMultiline(w io.Writer) {
+ w.Write(_multilinePrefix)
+ for _, item := range merr.errors {
+ w.Write(_multilineSeparator)
+ writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item))
+ }
+}
+
+// Writes s to the writer with the given prefix added before each line after
+// the first.
+func writePrefixLine(w io.Writer, prefix []byte, s string) {
+ first := true
+ for len(s) > 0 {
+ if first {
+ first = false
+ } else {
+ w.Write(prefix)
+ }
+
+ idx := strings.IndexByte(s, '\n')
+ if idx < 0 {
+ idx = len(s) - 1
+ }
+
+ io.WriteString(w, s[:idx+1])
+ s = s[idx+1:]
+ }
+}
+
+type inspectResult struct {
+ // Number of top-level non-nil errors
+ Count int
+
+ // Total number of errors including multiErrors
+ Capacity int
+
+ // Index of the first non-nil error in the list. Value is meaningless if
+ // Count is zero.
+ FirstErrorIdx int
+
+ // Whether the list contains at least one multiError
+ ContainsMultiError bool
+}
+
+// Inspects the given slice of errors so that we can efficiently allocate
+// space for it.
+func inspect(errors []error) (res inspectResult) {
+ first := true
+ for i, err := range errors {
+ if err == nil {
+ continue
+ }
+
+ res.Count++
+ if first {
+ first = false
+ res.FirstErrorIdx = i
+ }
+
+ if merr, ok := err.(*multiError); ok {
+ res.Capacity += len(merr.errors)
+ res.ContainsMultiError = true
+ } else {
+ res.Capacity++
+ }
+ }
+ return
+}
+
+// fromSlice converts the given list of errors into a single error.
+func fromSlice(errors []error) error {
+ res := inspect(errors)
+ switch res.Count {
+ case 0:
+ return nil
+ case 1:
+ // only one non-nil entry
+ return errors[res.FirstErrorIdx]
+ case len(errors):
+ if !res.ContainsMultiError {
+ // already flat
+ return &multiError{errors: errors}
+ }
+ }
+
+ nonNilErrs := make([]error, 0, res.Capacity)
+ for _, err := range errors[res.FirstErrorIdx:] {
+ if err == nil {
+ continue
+ }
+
+ if nested, ok := err.(*multiError); ok {
+ nonNilErrs = append(nonNilErrs, nested.errors...)
+ } else {
+ nonNilErrs = append(nonNilErrs, err)
+ }
+ }
+
+ return &multiError{errors: nonNilErrs}
+}
+
+// Combine combines the passed errors into a single error.
+//
+// If zero arguments were passed or if all items are nil, a nil error is
+// returned.
+//
+// Combine(nil, nil) // == nil
+//
+// If only a single error was passed, it is returned as-is.
+//
+// Combine(err) // == err
+//
+// Combine skips over nil arguments so this function may be used to combine
+// together errors from operations that fail independently of each other.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
+//
+// If any of the passed errors is a multierr error, it will be flattened along
+// with the other errors.
+//
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
+//
+// The returned error formats into a readable multi-line error message if
+// formatted with %+v.
+//
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+func Combine(errors ...error) error {
+ return fromSlice(errors)
+}
+
+// Append appends the given errors together. Either value may be nil.
+//
+// This function is a specialization of Combine for the common case where
+// there are only two errors.
+//
+// err = multierr.Append(reader.Close(), writer.Close())
+//
+// The following pattern may also be used to record failure of deferred
+// operations without losing information about the original error.
+//
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+func Append(left error, right error) error {
+ switch {
+ case left == nil:
+ return right
+ case right == nil:
+ return left
+ }
+
+ if _, ok := right.(*multiError); !ok {
+ if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) {
+ // Common case where the error on the left is constantly being
+ // appended to.
+ errs := append(l.errors, right)
+ return &multiError{errors: errs}
+ } else if !ok {
+ // Both errors are single errors.
+ return &multiError{errors: []error{left, right}}
+ }
+ }
+
+ // Either right or both, left and right, are multiErrors. Rely on usual
+ // expensive logic.
+ errors := [2]error{left, right}
+ return fromSlice(errors[0:])
+}
+
+// AppendInto appends an error into the destination of an error pointer and
+// returns whether the error being appended was non-nil.
+//
+// var err error
+// multierr.AppendInto(&err, r.Close())
+// multierr.AppendInto(&err, w.Close())
+//
+// The above is equivalent to,
+//
+// err := multierr.Append(r.Close(), w.Close())
+//
+// As AppendInto reports whether the provided error was non-nil, it may be
+// used to build a multierr error in a loop more ergonomically. For example:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if multierr.AppendInto(&err, parse(line, &item)) {
+// continue
+// }
+// items = append(items, item)
+// }
+//
+// Compare this with a verison that relies solely on Append:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if parseErr := parse(line, &item); parseErr != nil {
+// err = multierr.Append(err, parseErr)
+// continue
+// }
+// items = append(items, item)
+// }
+func AppendInto(into *error, err error) (errored bool) {
+ if into == nil {
+ // We panic if 'into' is nil. This is not documented above
+ // because suggesting that the pointer must be non-nil may
+ // confuse users into thinking that the error that it points
+ // to must be non-nil.
+ panic("misuse of multierr.AppendInto: into pointer must not be nil")
+ }
+
+ if err == nil {
+ return false
+ }
+ *into = Append(*into, err)
+ return true
+}
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
new file mode 100644
index 0000000..6ef084e
--- /dev/null
+++ b/vendor/go.uber.org/multierr/glide.yaml
@@ -0,0 +1,8 @@
+package: go.uber.org/multierr
+import:
+- package: go.uber.org/atomic
+ version: ^1
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go
new file mode 100644
index 0000000..264b0ea
--- /dev/null
+++ b/vendor/go.uber.org/multierr/go113.go
@@ -0,0 +1,52 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// +build go1.13
+
+package multierr
+
+import "errors"
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml
new file mode 100644
index 0000000..8e5ca7d
--- /dev/null
+++ b/vendor/go.uber.org/zap/.codecov.yml
@@ -0,0 +1,17 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 95% # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+ignore:
+ - internal/readme/readme.go
+
diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore
new file mode 100644
index 0000000..da9d9d0
--- /dev/null
+++ b/vendor/go.uber.org/zap/.gitignore
@@ -0,0 +1,32 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+vendor
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.pprof
+*.out
+*.log
+
+/bin
+cover.out
+cover.html
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
new file mode 100644
index 0000000..3154a1e
--- /dev/null
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -0,0 +1,109 @@
+# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+Blazing fast, structured, leveled logging in Go.
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users *choose* when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
+
+Log a message and 10 fields:
+
+{{.BenchmarkAddingFields}}
+
+Log a message with a logger that already has 10 fields of context:
+
+{{.BenchmarkAccumulatedContext}}
+
+Log a static string, without any context or `printf`-style templating:
+
+{{.BenchmarkWithoutFields}}
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+
+
+Released under the [MIT License](LICENSE.txt).
+
+ In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+
+[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
+[doc]: https://godoc.org/go.uber.org/zap
+[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
+[ci]: https://travis-ci.com/uber-go/zap
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
+
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
new file mode 100644
index 0000000..794ee30
--- /dev/null
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -0,0 +1,516 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## 1.19.1 (8 Sep 2021)
+
+### Fixed
+* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
+* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
+
+[#1001]: https://github.com/uber-go/zap/pull/1001
+[#1003]: https://github.com/uber-go/zap/pull/1003
+
+## 1.19.0 (9 Aug 2021)
+
+Enhancements:
+* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
+* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
+ better.
+
+[#975]: https://github.com/uber-go/zap/pull/975
+[#984]: https://github.com/uber-go/zap/pull/984
+
+Thanks to @lancoLiu and @thockin for their contributions to this release.
+
+## 1.18.1 (28 Jun 2021)
+
+Bugfixes:
+* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`.
+
+[#974]: https://github.com/uber-go/zap/pull/974
+
+## 1.18.0 (28 Jun 2021)
+
+Enhancements:
+* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers
+ messages in-memory and flushes them periodically.
+* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`.
+* [#897][]: Add `zap.WithClock` option to control the source of time via the
+ new `zapcore.Clock` interface.
+* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w`
+ methods don't match expectations.
+* [#943][]: Add support for filtering by level or arbitrary matcher function to
+ `zaptest/observer`.
+* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's
+ `buffer.Buffer`.
+
+Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee
+for their contributions to this release.
+
+[#691]: https://github.com/uber-go/zap/pull/691
+[#897]: https://github.com/uber-go/zap/pull/897
+[#943]: https://github.com/uber-go/zap/pull/943
+[#949]: https://github.com/uber-go/zap/pull/949
+[#961]: https://github.com/uber-go/zap/pull/961
+[#971]: https://github.com/uber-go/zap/pull/971
+
+## 1.17.0 (25 May 2021)
+
+Bugfixes:
+* [#867][]: Encode `` for nil `error` instead of a panic.
+* [#931][], [#936][]: Update minimum version constraints to address
+ vulnerabilities in dependencies.
+
+Enhancements:
+* [#865][]: Improve alignment of fields of the Logger struct, reducing its
+ size from 96 to 80 bytes.
+* [#881][]: Support `grpclog.LoggerV2` in zapgrpc.
+* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler
+ with the `application/x-www-form-urlencoded` content type.
+* [#912][]: Support multi-field encoding with `zap.Inline`.
+* [#913][]: Speed up SugaredLogger for calls with a single string.
+* [#928][]: Add support for filtering by field name to `zaptest/observer`.
+
+Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
+
+## 1.16.0 (1 Sep 2020)
+
+Bugfixes:
+* [#828][]: Fix missing newline in IncreaseLevel error messages.
+* [#835][]: Fix panic in JSON encoder when encoding times or durations
+ without specifying a time or duration encoder.
+* [#843][]: Honor CallerSkip when taking stack traces.
+* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead.
+* [#854][]: Encode `` for nil `Stringer` instead of a panic error log.
+
+Enhancements:
+* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders
+ for custom layouts.
+* [#697][]: Added support for a configurable delimiter in the console encoder.
+* [#852][]: Optimize console encoder by pooling the underlying JSON encoder.
+* [#844][]: Add ability to include the calling function as part of logs.
+* [#843][]: Add `StackSkip` for including truncated stacks as a field.
+* [#861][]: Add options to customize Fatal behaviour for better testability.
+
+Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
+
+## 1.15.0 (23 Apr 2020)
+
+Bugfixes:
+* [#804][]: Fix handling of `Time` values out of `UnixNano` range.
+* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`.
+
+Enhancements:
+* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This
+ allows disabling annotation of log entries with caller information if
+ previously enabled with `AddCaller`.
+* [#813][]: Deprecate `NewSampler` constructor in favor of
+ `NewSamplerWithOptions` which supports a `SamplerHook` option. This option
+ adds support for monitoring sampling decisions through a hook.
+
+Thanks to @danielbprice for their contributions to this release.
+
+## 1.14.1 (14 Mar 2020)
+
+Bugfixes:
+* [#791][]: Fix panic on attempting to build a logger with an invalid Config.
+* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's
+ development-time dependencies.
+* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to
+ be generated for arrays of `time.Time` objects when using string-based time
+ formats.
+
+Thanks to @YashishDua for their contributions to this release.
+
+## 1.14.0 (20 Feb 2020)
+
+Enhancements:
+* [#771][]: Optimize calls for disabled log levels.
+* [#773][]: Add millisecond duration encoder.
+* [#775][]: Add option to increase the level of a logger.
+* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible.
+
+Thanks to @caibirdme for their contributions to this release.
+
+## 1.13.0 (13 Nov 2019)
+
+Enhancements:
+* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors
+ to log pointers to primitives with support for `nil` values.
+
+Thanks to @jbizzle for their contributions to this release.
+
+## 1.12.0 (29 Oct 2019)
+
+Enhancements:
+* [#751][]: Migrate to Go modules.
+
+## 1.11.0 (21 Oct 2019)
+
+Enhancements:
+* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`.
+* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders.
+
+Thanks to @juicemia, @uhthomas for their contributions to this release.
+
+## 1.10.0 (29 Apr 2019)
+
+Bugfixes:
+* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a
+ string.
+* [#706][]: Fix incorrect call depth to determine caller in Go 1.12.
+
+Enhancements:
+* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test
+ loggers.
+* [#675][]: Don't panic when encoding a String field.
+* [#704][]: Disable HTML escaping for JSON objects encoded using the
+ reflect-based encoder.
+
+Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
+to this release.
+
+## v1.9.1 (06 Aug 2018)
+
+Bugfixes:
+
+* [#614][]: MapObjectEncoder should not ignore empty slices.
+
+## v1.9.0 (19 Jul 2018)
+
+Enhancements:
+* [#602][]: Reduce number of allocations when logging with reflection.
+* [#572][], [#606][]: Expose a registry for third-party logging sinks.
+
+Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
+@dimroc for their contributions to this release.
+
+## v1.8.0 (13 Apr 2018)
+
+Enhancements:
+* [#508][]: Make log level configurable when redirecting the standard
+ library's logger.
+* [#518][]: Add a logger that writes to a `*testing.TB`.
+* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc.
+
+Bugfixes:
+* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`.
+
+Thanks to @DiSiqueira and @djui for their contributions to this release.
+
+## v1.7.1 (25 Sep 2017)
+
+Bugfixes:
+* [#504][]: Store strings when using AddByteString with the map encoder.
+
+## v1.7.0 (21 Sep 2017)
+
+Enhancements:
+
+* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
+ to specify the level of the logged messages.
+
+## v1.6.0 (30 Aug 2017)
+
+Enhancements:
+
+* [#491][]: Omit zap stack frames from stacktraces.
+* [#490][]: Add a `ContextMap` method to observer logs for simpler
+ field validation in tests.
+
+## v1.5.0 (22 Jul 2017)
+
+Enhancements:
+
+* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`.
+* [#465][]: Support user-supplied encoders for logger names.
+
+Bugfixes:
+
+* [#477][]: Fix a bug that incorrectly truncated deep stacktraces.
+
+Thanks to @richard-tunein and @pavius for their contributions to this release.
+
+## v1.4.1 (08 Jun 2017)
+
+This release fixes two bugs.
+
+Bugfixes:
+
+* [#435][]: Support a variety of case conventions when unmarshaling levels.
+* [#444][]: Fix a panic in the observer.
+
+## v1.4.0 (12 May 2017)
+
+This release adds a few small features and is fully backward-compatible.
+
+Enhancements:
+
+* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to
+ override the Unix-style default.
+* [#425][]: Preserve time zones when logging times.
+* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
+ variety of operations a bit simpler.
+
+## v1.3.0 (25 Apr 2017)
+
+This release adds an enhancement to zap's testing helpers as well as the
+ability to marshal an AtomicLevel. It is fully backward-compatible.
+
+Enhancements:
+
+* [#415][]: Add a substring-filtering helper to zap's observer. This is
+ particularly useful when testing the `SugaredLogger`.
+* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
+
+## v1.2.0 (13 Apr 2017)
+
+This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
+
+Enhancements:
+
+* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
+ `grpclog.Logger`.
+
+## v1.1.0 (31 Mar 2017)
+
+This release fixes two bugs and adds some enhancements to zap's testing helpers.
+It is fully backward-compatible.
+
+Bugfixes:
+
+* [#385][]: Fix caller path trimming on Windows.
+* [#396][]: Fix a panic when attempting to use non-existent directories with
+ zap's configuration struct.
+
+Enhancements:
+
+* [#386][]: Add filtering helpers to zaptest's observing logger.
+
+Thanks to @moitias for contributing to this release.
+
+## v1.0.0 (14 Mar 2017)
+
+This is zap's first stable release. All exported APIs are now final, and no
+further breaking changes will be made in the 1.x release series. Anyone using a
+semver-aware dependency manager should now pin to `^1`.
+
+Breaking changes:
+
+* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without
+ casting from `[]byte` to `string`.
+* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`,
+ `zap.Logger`, and `zap.SugaredLogger`.
+* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to
+ clash with other testing helpers.
+
+Bugfixes:
+
+* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier
+ for tab-separated console output.
+* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to
+ work with concurrency-safe `WriteSyncer` implementations.
+* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux
+ systems.
+* [#373][]: Report the correct caller from zap's standard library
+ interoperability wrappers.
+
+Enhancements:
+
+* [#348][]: Add a registry allowing third-party encodings to work with zap's
+ built-in `Config`.
+* [#327][]: Make the representation of logger callers configurable (like times,
+ levels, and durations).
+* [#376][]: Allow third-party encoders to use their own buffer pools, which
+ removes the last performance advantage that zap's encoders have over plugins.
+* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple
+ `WriteSyncer`s and lock the result.
+* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in
+ Go 1.9).
+* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it
+ easier for particularly punctilious users to unit test their application's
+ logging.
+
+Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
+contributions to this release.
+
+## v1.0.0-rc.3 (7 Mar 2017)
+
+This is the third release candidate for zap's stable release. There are no
+breaking changes.
+
+Bugfixes:
+
+* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs
+ rather than `[]uint8`.
+
+Enhancements:
+
+* [#307][]: Users can opt into colored output for log levels.
+* [#353][]: In addition to hijacking the output of the standard library's
+ package-global logging functions, users can now construct a zap-backed
+ `log.Logger` instance.
+* [#311][]: Frames from common runtime functions and some of zap's internal
+ machinery are now omitted from stacktraces.
+
+Thanks to @ansel1 and @suyash for their contributions to this release.
+
+## v1.0.0-rc.2 (21 Feb 2017)
+
+This is the second release candidate for zap's stable release. It includes two
+breaking changes.
+
+Breaking changes:
+
+* [#316][]: Zap's global loggers are now fully concurrency-safe
+ (previously, users had to ensure that `ReplaceGlobals` was called before the
+ loggers were in use). However, they must now be accessed via the `L()` and
+ `S()` functions. Users can update their projects with
+
+ ```
+ gofmt -r "zap.L -> zap.L()" -w .
+ gofmt -r "zap.S -> zap.S()" -w .
+ ```
+* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid
+ JSON and YAML struct tags on all config structs. This release fixes the tags
+ and adds static analysis to prevent similar bugs in the future.
+
+Bugfixes:
+
+* [#321][]: Redirecting the standard library's `log` output now
+ correctly reports the logger's caller.
+
+Enhancements:
+
+* [#325][] and [#333][]: Zap now transparently supports non-standard, rich
+ errors like those produced by `github.com/pkg/errors`.
+* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is
+ now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) ->
+ zap.NewNop()' -w .`.
+* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a
+ more informative error.
+
+Thanks to @skipor and @chapsuk for their contributions to this release.
+
+## v1.0.0-rc.1 (14 Feb 2017)
+
+This is the first release candidate for zap's stable release. There are multiple
+breaking changes and improvements from the pre-release version. Most notably:
+
+* **Zap's import path is now "go.uber.org/zap"** — all users will
+ need to update their code.
+* User-facing types and functions remain in the `zap` package. Code relevant
+ largely to extension authors is now in the `zapcore` package.
+* The `zapcore.Core` type makes it easy for third-party packages to use zap's
+ internals but provide a different user-facing API.
+* `Logger` is now a concrete type instead of an interface.
+* A less verbose (though slower) logging API is included by default.
+* Package-global loggers `L` and `S` are included.
+* A human-friendly console encoder is included.
+* A declarative config struct allows common logger configurations to be managed
+ as configuration instead of code.
+* Sampling is more accurate, and doesn't depend on the standard library's shared
+ timer heap.
+
+## v0.1.0-beta.1 (6 Feb 2017)
+
+This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
+upgrade at their leisure. Since this is the first tagged release, there are no
+backward compatibility concerns and all functionality is new.
+
+Early zap adopters should pin to the 0.1.x minor version until they're ready to
+upgrade to the upcoming stable release.
+
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+[#402]: https://github.com/uber-go/zap/pull/402
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+[#487]: https://github.com/uber-go/zap/pull/487
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+[#504]: https://github.com/uber-go/zap/pull/504
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+[#614]: https://github.com/uber-go/zap/pull/614
+[#657]: https://github.com/uber-go/zap/pull/657
+[#706]: https://github.com/uber-go/zap/pull/706
+[#610]: https://github.com/uber-go/zap/pull/610
+[#675]: https://github.com/uber-go/zap/pull/675
+[#704]: https://github.com/uber-go/zap/pull/704
+[#725]: https://github.com/uber-go/zap/pull/725
+[#736]: https://github.com/uber-go/zap/pull/736
+[#751]: https://github.com/uber-go/zap/pull/751
+[#758]: https://github.com/uber-go/zap/pull/758
+[#771]: https://github.com/uber-go/zap/pull/771
+[#773]: https://github.com/uber-go/zap/pull/773
+[#775]: https://github.com/uber-go/zap/pull/775
+[#786]: https://github.com/uber-go/zap/pull/786
+[#791]: https://github.com/uber-go/zap/pull/791
+[#795]: https://github.com/uber-go/zap/pull/795
+[#799]: https://github.com/uber-go/zap/pull/799
+[#804]: https://github.com/uber-go/zap/pull/804
+[#812]: https://github.com/uber-go/zap/pull/812
+[#806]: https://github.com/uber-go/zap/pull/806
+[#813]: https://github.com/uber-go/zap/pull/813
+[#629]: https://github.com/uber-go/zap/pull/629
+[#697]: https://github.com/uber-go/zap/pull/697
+[#828]: https://github.com/uber-go/zap/pull/828
+[#835]: https://github.com/uber-go/zap/pull/835
+[#843]: https://github.com/uber-go/zap/pull/843
+[#844]: https://github.com/uber-go/zap/pull/844
+[#852]: https://github.com/uber-go/zap/pull/852
+[#854]: https://github.com/uber-go/zap/pull/854
+[#861]: https://github.com/uber-go/zap/pull/861
+[#862]: https://github.com/uber-go/zap/pull/862
+[#865]: https://github.com/uber-go/zap/pull/865
+[#867]: https://github.com/uber-go/zap/pull/867
+[#881]: https://github.com/uber-go/zap/pull/881
+[#903]: https://github.com/uber-go/zap/pull/903
+[#912]: https://github.com/uber-go/zap/pull/912
+[#913]: https://github.com/uber-go/zap/pull/913
+[#928]: https://github.com/uber-go/zap/pull/928
+[#931]: https://github.com/uber-go/zap/pull/931
+[#936]: https://github.com/uber-go/zap/pull/936
diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..e327d9a
--- /dev/null
+++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
@@ -0,0 +1,75 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age,
+body size, disability, ethnicity, gender identity and expression, level of
+experience, nationality, personal appearance, race, religion, or sexual
+identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an
+appointed representative at an online or offline event. Representation of a
+project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at oss-conduct@uber.com. The project
+team will review and investigate all complaints, and will respond in a way
+that it deems appropriate to the circumstances. The project team is obligated
+to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.4, available at
+[http://contributor-covenant.org/version/1/4][version].
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
new file mode 100644
index 0000000..5cd9656
--- /dev/null
+++ b/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -0,0 +1,75 @@
+# Contributing
+
+We'd love your help making zap the very best structured logging library in Go!
+
+If you'd like to add new exported APIs, please [open an issue][open-issue]
+describing your proposal — discussing API changes ahead of time makes
+pull request review much smoother. In your issue, pull request, and any other
+communications, please remember to treat your fellow contributors with
+respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously.
+
+Note that you'll need to sign [Uber's Contributor License Agreement][cla]
+before we can accept any of your contributions. If necessary, a bot will remind
+you to accept the CLA when you open your pull request.
+
+## Setup
+
+[Fork][fork], then clone the repository:
+
+```
+mkdir -p $GOPATH/src/go.uber.org
+cd $GOPATH/src/go.uber.org
+git clone git@github.com:your_github_username/zap.git
+cd zap
+git remote add upstream https://github.com/uber-go/zap.git
+git fetch upstream
+```
+
+Make sure that the tests and the linters pass:
+
+```
+make test
+make lint
+```
+
+If you're not using the minor version of Go specified in the Makefile's
+`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
+fine, but it means that you'll only discover lint failures after you open your
+pull request.
+
+## Making Changes
+
+Start by creating a new branch for your changes:
+
+```
+cd $GOPATH/src/go.uber.org/zap
+git checkout master
+git fetch upstream
+git rebase upstream/master
+git checkout -b cool_new_feature
+```
+
+Make your changes, then ensure that `make lint` and `make test` still pass. If
+you're satisfied with your changes, push them to your fork.
+
+```
+git push origin cool_new_feature
+```
+
+Then use the GitHub UI to open a pull request.
+
+At this point, you're waiting on us to review your changes. We *try* to respond
+to issues and pull requests within a few business days, and we may suggest some
+improvements or alternatives. Once your changes are approved, one of the
+project maintainers will merge them.
+
+We're much more likely to approve your changes if you:
+
+* Add tests for new functionality.
+* Write a [good commit message][commit-message].
+* Maintain backward compatibility.
+
+[fork]: https://github.com/uber-go/zap/fork
+[open-issue]: https://github.com/uber-go/zap/issues/new
+[cla]: https://cla-assistant.io/uber-go/zap
+[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md
new file mode 100644
index 0000000..b183b20
--- /dev/null
+++ b/vendor/go.uber.org/zap/FAQ.md
@@ -0,0 +1,164 @@
+# Frequently Asked Questions
+
+## Design
+
+### Why spend so much effort on logger performance?
+
+Of course, most applications won't notice the impact of a slow logger: they
+already take tens or hundreds of milliseconds for each operation, so an extra
+millisecond doesn't matter.
+
+On the other hand, why *not* make structured logging fast? The `SugaredLogger`
+isn't any harder to use than other logging packages, and the `Logger` makes
+structured logging possible in performance-sensitive contexts. Across a fleet
+of Go microservices, making each application even slightly more efficient adds
+up quickly.
+
+### Why aren't `Logger` and `SugaredLogger` interfaces?
+
+Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and
+`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points
+out][go-proverbs], "The bigger the interface, the weaker the abstraction."
+Interfaces are also rigid — *any* change requires releasing a new major
+version, since it breaks all third-party implementations.
+
+Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much
+abstraction, and it lets us add methods without introducing breaking changes.
+Your applications should define and depend upon an interface that includes
+just the methods you use.
+
+### Why are some of my logs missing?
+
+Logs are dropped intentionally by zap when sampling is enabled. The production
+configuration (as returned by `NewProductionConfig()` enables sampling which will
+cause repeated logs within a second to be sampled. See more details on why sampling
+is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs).
+
+### Why sample application logs?
+
+Applications often experience runs of errors, either because of a bug or
+because of a misbehaving user. Logging errors is usually a good idea, but it
+can easily make this bad situation worse: not only is your application coping
+with a flood of errors, it's also spending extra CPU cycles and I/O logging
+those errors. Since writes are typically serialized, logging limits throughput
+when you need it most.
+
+Sampling fixes this problem by dropping repetitive log entries. Under normal
+conditions, your application writes out every entry. When similar entries are
+logged hundreds or thousands of times each second, though, zap begins dropping
+duplicates to preserve throughput.
+
+### Why do the structured logging APIs take a message in addition to fields?
+
+Subjectively, we find it helpful to accompany structured context with a brief
+description. This isn't critical during development, but it makes debugging
+and operating unfamiliar systems much easier.
+
+More concretely, zap's sampling algorithm uses the message to identify
+duplicate entries. In our experience, this is a practical middle ground
+between random sampling (which often drops the exact entry that you need while
+debugging) and hashing the complete entry (which is prohibitively expensive).
+
+### Why include package-global loggers?
+
+Since so many other logging packages include a global logger, many
+applications aren't designed to accept loggers as explicit parameters.
+Changing function signatures is often a breaking change, so zap includes
+global loggers to simplify migration.
+
+Avoid them where possible.
+
+### Why include dedicated Panic and Fatal log levels?
+
+In general, application code should handle errors gracefully instead of using
+`panic` or `os.Exit`. However, every rule has exceptions, and it's common to
+crash when an error is truly unrecoverable. To avoid losing any information
+— especially the reason for the crash — the logger must flush any
+buffered entries before the process exits.
+
+Zap makes this easy by offering `Panic` and `Fatal` logging methods that
+automatically flush before exiting. Of course, this doesn't guarantee that
+logs will never be lost, but it eliminates a common error.
+
+See the discussion in uber-go/zap#207 for more details.
+
+### What's `DPanic`?
+
+`DPanic` stands for "panic in development." In development, it logs at
+`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to
+catch errors that are theoretically possible, but shouldn't actually happen,
+*without* crashing in production.
+
+If you've ever written code like this, you need `DPanic`:
+
+```go
+if err != nil {
+ panic(fmt.Sprintf("shouldn't ever get here: %v", err))
+}
+```
+
+## Installation
+
+### What does the error `expects import "go.uber.org/zap"` mean?
+
+Either zap was installed incorrectly or you're referencing the wrong package
+name in your code.
+
+Zap's source code happens to be hosted on GitHub, but the [import
+path][import-path] is `go.uber.org/zap`. This gives us, the project
+maintainers, the freedom to move the source code if necessary. However, it
+means that you need to take a little care when installing and using the
+package.
+
+If you follow two simple rules, everything should work: install zap with `go
+get -u go.uber.org/zap`, and always import it in your code with `import
+"go.uber.org/zap"`. Your code shouldn't contain *any* references to
+`github.com/uber-go/zap`.
+
+## Usage
+
+### Does zap support log rotation?
+
+Zap doesn't natively support rotating log files, since we prefer to leave this
+to an external program like `logrotate`.
+
+However, it's easy to integrate a log rotation package like
+[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`.
+
+```go
+// lumberjack.Logger is already safe for concurrent use, so we don't need to
+// lock it.
+w := zapcore.AddSync(&lumberjack.Logger{
+ Filename: "/var/log/myapp/foo.log",
+ MaxSize: 500, // megabytes
+ MaxBackups: 3,
+ MaxAge: 28, // days
+})
+core := zapcore.NewCore(
+ zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),
+ w,
+ zap.InfoLevel,
+)
+logger := zap.New(core)
+```
+
+## Extensions
+
+We'd love to support every logging need within zap itself, but we're only
+familiar with a handful of log ingestion systems, flag-parsing packages, and
+the like. Rather than merging code that we can't effectively debug and
+support, we'd rather grow an ecosystem of zap extensions.
+
+We're aware of the following extensions, but haven't used them ourselves:
+
+| Package | Integration |
+| --- | --- |
+| `github.com/tchap/zapext` | Sentry, syslog |
+| `github.com/fgrosse/zaptest` | Ginkgo |
+| `github.com/blendle/zapdriver` | Stackdriver |
+| `github.com/moul/zapgorm` | Gorm |
+| `github.com/moul/zapfilter` | Advanced filtering rules |
+
+[go-proverbs]: https://go-proverbs.github.io/
+[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths
+[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2
diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt
new file mode 100644
index 0000000..6652bed
--- /dev/null
+++ b/vendor/go.uber.org/zap/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016-2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
new file mode 100644
index 0000000..9b1bc3b
--- /dev/null
+++ b/vendor/go.uber.org/zap/Makefile
@@ -0,0 +1,73 @@
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+STATICCHECK = $(GOBIN)/staticcheck
+BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
+
+# Directories containing independent Go modules.
+#
+# We track coverage only for the main module.
+MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
+
+# Many Go tools take file globs or directories as arguments instead of packages.
+GO_FILES := $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+.PHONY: all
+all: lint test
+
+.PHONY: lint
+lint: $(GOLINT) $(STATICCHECK)
+ @rm -rf lint.log
+ @echo "Checking formatting..."
+ @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
+ @echo "Checking vet..."
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking lint..."
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking staticcheck..."
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking for unresolved FIXMEs..."
+ @git grep -i fixme | grep -v -e Makefile | tee -a lint.log
+ @echo "Checking for license headers..."
+ @./checklicense.sh | tee -a lint.log
+ @[ ! -s lint.log ]
+ @echo "Checking 'go mod tidy'..."
+ @make tidy
+ @if ! git diff --quiet; then \
+ echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
+ git --no-pager diff; \
+ fi
+
+$(GOLINT):
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK):
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+.PHONY: test
+test:
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true
+
+.PHONY: cover
+cover:
+ go test -race -coverprofile=cover.out -coverpkg=./... ./...
+ go tool cover -html=cover.out -o cover.html
+
+.PHONY: bench
+BENCH ?= .
+bench:
+ @$(foreach dir,$(MODULE_DIRS), ( \
+ cd $(dir) && \
+ go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \
+ ) &&) true
+
+.PHONY: updatereadme
+updatereadme:
+ rm -f README.md
+ cat .readme.tmpl | go run internal/readme/readme.go > README.md
+
+.PHONY: tidy
+tidy:
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
new file mode 100644
index 0000000..1e64d6c
--- /dev/null
+++ b/vendor/go.uber.org/zap/README.md
@@ -0,0 +1,134 @@
+# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+Blazing fast, structured, leveled logging in Go.
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users *choose* when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
+
+Log a message and 10 fields:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------ | :--: | :-----------: | :---------------: |
+| :zap: zap | 862 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
+| zerolog | 4021 ns/op | +366% | 76 allocs/op
+| go-kit | 4542 ns/op | +427% | 105 allocs/op
+| apex/log | 26785 ns/op | +3007% | 115 allocs/op
+| logrus | 29501 ns/op | +3322% | 125 allocs/op
+| log15 | 29906 ns/op | +3369% | 122 allocs/op
+
+Log a message with a logger that already has 10 fields of context:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------ | :--: | :-----------: | :---------------: |
+| :zap: zap | 126 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
+| zerolog | 88 ns/op | -30% | 0 allocs/op
+| go-kit | 5087 ns/op | +3937% | 103 allocs/op
+| log15 | 18548 ns/op | +14621% | 73 allocs/op
+| apex/log | 26012 ns/op | +20544% | 104 allocs/op
+| logrus | 27236 ns/op | +21516% | 113 allocs/op
+
+Log a static string, without any context or `printf`-style templating:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------ | :--: | :-----------: | :---------------: |
+| :zap: zap | 118 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
+| zerolog | 93 ns/op | -21% | 0 allocs/op
+| go-kit | 280 ns/op | +137% | 11 allocs/op
+| standard library | 499 ns/op | +323% | 2 allocs/op
+| apex/log | 1990 ns/op | +1586% | 10 allocs/op
+| logrus | 3129 ns/op | +2552% | 24 allocs/op
+| log15 | 3887 ns/op | +3194% | 23 allocs/op
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+
+
+Released under the [MIT License](LICENSE.txt).
+
+ In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
+
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
+
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
new file mode 100644
index 0000000..5be3704
--- /dev/null
+++ b/vendor/go.uber.org/zap/array.go
@@ -0,0 +1,320 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Array constructs a field with the given key and ArrayMarshaler. It provides
+// a flexible, but still type-safe and efficient, way to add array-like types
+// to the logging context. The struct's MarshalLogArray method is called lazily.
+func Array(key string, val zapcore.ArrayMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val}
+}
+
+// Bools constructs a field that carries a slice of bools.
+func Bools(key string, bs []bool) Field {
+ return Array(key, bools(bs))
+}
+
+// ByteStrings constructs a field that carries a slice of []byte, each of which
+// must be UTF-8 encoded text.
+func ByteStrings(key string, bss [][]byte) Field {
+ return Array(key, byteStringsArray(bss))
+}
+
+// Complex128s constructs a field that carries a slice of complex numbers.
+func Complex128s(key string, nums []complex128) Field {
+ return Array(key, complex128s(nums))
+}
+
+// Complex64s constructs a field that carries a slice of complex numbers.
+func Complex64s(key string, nums []complex64) Field {
+ return Array(key, complex64s(nums))
+}
+
+// Durations constructs a field that carries a slice of time.Durations.
+func Durations(key string, ds []time.Duration) Field {
+ return Array(key, durations(ds))
+}
+
+// Float64s constructs a field that carries a slice of floats.
+func Float64s(key string, nums []float64) Field {
+ return Array(key, float64s(nums))
+}
+
+// Float32s constructs a field that carries a slice of floats.
+func Float32s(key string, nums []float32) Field {
+ return Array(key, float32s(nums))
+}
+
+// Ints constructs a field that carries a slice of integers.
+func Ints(key string, nums []int) Field {
+ return Array(key, ints(nums))
+}
+
+// Int64s constructs a field that carries a slice of integers.
+func Int64s(key string, nums []int64) Field {
+ return Array(key, int64s(nums))
+}
+
+// Int32s constructs a field that carries a slice of integers.
+func Int32s(key string, nums []int32) Field {
+ return Array(key, int32s(nums))
+}
+
+// Int16s constructs a field that carries a slice of integers.
+func Int16s(key string, nums []int16) Field {
+ return Array(key, int16s(nums))
+}
+
+// Int8s constructs a field that carries a slice of integers.
+func Int8s(key string, nums []int8) Field {
+ return Array(key, int8s(nums))
+}
+
+// Strings constructs a field that carries a slice of strings.
+func Strings(key string, ss []string) Field {
+ return Array(key, stringArray(ss))
+}
+
+// Times constructs a field that carries a slice of time.Times.
+func Times(key string, ts []time.Time) Field {
+ return Array(key, times(ts))
+}
+
+// Uints constructs a field that carries a slice of unsigned integers.
+func Uints(key string, nums []uint) Field {
+ return Array(key, uints(nums))
+}
+
+// Uint64s constructs a field that carries a slice of unsigned integers.
+func Uint64s(key string, nums []uint64) Field {
+ return Array(key, uint64s(nums))
+}
+
+// Uint32s constructs a field that carries a slice of unsigned integers.
+func Uint32s(key string, nums []uint32) Field {
+ return Array(key, uint32s(nums))
+}
+
+// Uint16s constructs a field that carries a slice of unsigned integers.
+func Uint16s(key string, nums []uint16) Field {
+ return Array(key, uint16s(nums))
+}
+
+// Uint8s constructs a field that carries a slice of unsigned integers.
+func Uint8s(key string, nums []uint8) Field {
+ return Array(key, uint8s(nums))
+}
+
+// Uintptrs constructs a field that carries a slice of pointer addresses.
+func Uintptrs(key string, us []uintptr) Field {
+ return Array(key, uintptrs(us))
+}
+
+// Errors constructs a field that carries a slice of errors.
+func Errors(key string, errs []error) Field {
+ return Array(key, errArray(errs))
+}
+
+type bools []bool
+
+func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bs {
+ arr.AppendBool(bs[i])
+ }
+ return nil
+}
+
+type byteStringsArray [][]byte
+
+func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bss {
+ arr.AppendByteString(bss[i])
+ }
+ return nil
+}
+
+type complex128s []complex128
+
+func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex128(nums[i])
+ }
+ return nil
+}
+
+type complex64s []complex64
+
+func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex64(nums[i])
+ }
+ return nil
+}
+
+type durations []time.Duration
+
+func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ds {
+ arr.AppendDuration(ds[i])
+ }
+ return nil
+}
+
+type float64s []float64
+
+func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat64(nums[i])
+ }
+ return nil
+}
+
+type float32s []float32
+
+func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat32(nums[i])
+ }
+ return nil
+}
+
+type ints []int
+
+func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt(nums[i])
+ }
+ return nil
+}
+
+type int64s []int64
+
+func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt64(nums[i])
+ }
+ return nil
+}
+
+type int32s []int32
+
+func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt32(nums[i])
+ }
+ return nil
+}
+
+type int16s []int16
+
+func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt16(nums[i])
+ }
+ return nil
+}
+
+type int8s []int8
+
+func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt8(nums[i])
+ }
+ return nil
+}
+
+type stringArray []string
+
+func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ss {
+ arr.AppendString(ss[i])
+ }
+ return nil
+}
+
+type times []time.Time
+
+func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ts {
+ arr.AppendTime(ts[i])
+ }
+ return nil
+}
+
+type uints []uint
+
+func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint(nums[i])
+ }
+ return nil
+}
+
+type uint64s []uint64
+
+func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint64(nums[i])
+ }
+ return nil
+}
+
+type uint32s []uint32
+
+func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint32(nums[i])
+ }
+ return nil
+}
+
+type uint16s []uint16
+
+func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint16(nums[i])
+ }
+ return nil
+}
+
+type uint8s []uint8
+
+func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint8(nums[i])
+ }
+ return nil
+}
+
+type uintptrs []uintptr
+
+func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUintptr(nums[i])
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
new file mode 100644
index 0000000..9e929cd
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package buffer provides a thin wrapper around a byte slice. Unlike the
+// standard library's bytes.Buffer, it supports a portion of the strconv
+// package's zero-allocation formatters.
+package buffer // import "go.uber.org/zap/buffer"
+
+import (
+ "strconv"
+ "time"
+)
+
+const _size = 1024 // by default, create 1 KiB buffers
+
+// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
+// the only way to construct one is via a Pool.
+type Buffer struct {
+ bs []byte
+ pool Pool
+}
+
+// AppendByte writes a single byte to the Buffer.
+func (b *Buffer) AppendByte(v byte) {
+ b.bs = append(b.bs, v)
+}
+
+// AppendString writes a string to the Buffer.
+func (b *Buffer) AppendString(s string) {
+ b.bs = append(b.bs, s...)
+}
+
+// AppendInt appends an integer to the underlying buffer (assuming base 10).
+func (b *Buffer) AppendInt(i int64) {
+ b.bs = strconv.AppendInt(b.bs, i, 10)
+}
+
+// AppendTime appends the time formatted using the specified layout.
+func (b *Buffer) AppendTime(t time.Time, layout string) {
+ b.bs = t.AppendFormat(b.bs, layout)
+}
+
+// AppendUint appends an unsigned integer to the underlying buffer (assuming
+// base 10).
+func (b *Buffer) AppendUint(i uint64) {
+ b.bs = strconv.AppendUint(b.bs, i, 10)
+}
+
+// AppendBool appends a bool to the underlying buffer.
+func (b *Buffer) AppendBool(v bool) {
+ b.bs = strconv.AppendBool(b.bs, v)
+}
+
+// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
+// or +/- Inf.
+func (b *Buffer) AppendFloat(f float64, bitSize int) {
+ b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
+}
+
+// Len returns the length of the underlying byte slice.
+func (b *Buffer) Len() int {
+ return len(b.bs)
+}
+
+// Cap returns the capacity of the underlying byte slice.
+func (b *Buffer) Cap() int {
+ return cap(b.bs)
+}
+
+// Bytes returns a mutable reference to the underlying byte slice.
+func (b *Buffer) Bytes() []byte {
+ return b.bs
+}
+
+// String returns a string copy of the underlying byte slice.
+func (b *Buffer) String() string {
+ return string(b.bs)
+}
+
+// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
+// backing array.
+func (b *Buffer) Reset() {
+ b.bs = b.bs[:0]
+}
+
+// Write implements io.Writer.
+func (b *Buffer) Write(bs []byte) (int, error) {
+ b.bs = append(b.bs, bs...)
+ return len(bs), nil
+}
+
+// WriteByte writes a single byte to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteByte(v byte) error {
+ b.AppendByte(v)
+ return nil
+}
+
+// WriteString writes a string to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteString(s string) (int, error) {
+ b.AppendString(s)
+ return len(s), nil
+}
+
+// TrimNewline trims any final "\n" byte from the end of the buffer.
+func (b *Buffer) TrimNewline() {
+ if i := len(b.bs) - 1; i >= 0 {
+ if b.bs[i] == '\n' {
+ b.bs = b.bs[:i]
+ }
+ }
+}
+
+// Free returns the Buffer to its Pool.
+//
+// Callers must not retain references to the Buffer after calling Free.
+func (b *Buffer) Free() {
+ b.pool.put(b)
+}
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
new file mode 100644
index 0000000..8fb3e20
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/pool.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package buffer
+
+import "sync"
+
+// A Pool is a type-safe wrapper around a sync.Pool.
+type Pool struct {
+ p *sync.Pool
+}
+
+// NewPool constructs a new Pool.
+func NewPool() Pool {
+ return Pool{p: &sync.Pool{
+ New: func() interface{} {
+ return &Buffer{bs: make([]byte, 0, _size)}
+ },
+ }}
+}
+
+// Get retrieves a Buffer from the pool, creating one if necessary.
+func (p Pool) Get() *Buffer {
+ buf := p.p.Get().(*Buffer)
+ buf.Reset()
+ buf.pool = p
+ return buf
+}
+
+func (p Pool) put(buf *Buffer) {
+ p.p.Put(buf)
+}
diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh
new file mode 100644
index 0000000..345ac8b
--- /dev/null
+++ b/vendor/go.uber.org/zap/checklicense.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+ERROR_COUNT=0
+while read -r file
+do
+ case "$(head -1 "${file}")" in
+ *"Copyright (c) "*" Uber Technologies, Inc.")
+ # everything's cool
+ ;;
+ *)
+ echo "$file is missing license header."
+ (( ERROR_COUNT++ ))
+ ;;
+ esac
+done < <(git ls-files "*\.go")
+
+exit $ERROR_COUNT
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
new file mode 100644
index 0000000..55637fb
--- /dev/null
+++ b/vendor/go.uber.org/zap/config.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "sort"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// SamplingConfig sets a sampling strategy for the logger. Sampling caps the
+// global CPU and I/O load that logging puts on your process while attempting
+// to preserve a representative subset of your logs.
+//
+// If specified, the Sampler will invoke the Hook after each decision.
+//
+// Values configured here are per-second. See zapcore.NewSamplerWithOptions for
+// details.
+type SamplingConfig struct {
+ Initial int `json:"initial" yaml:"initial"`
+ Thereafter int `json:"thereafter" yaml:"thereafter"`
+ Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"`
+}
+
+// Config offers a declarative way to construct a logger. It doesn't do
+// anything that can't be done with New, Options, and the various
+// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to
+// toggle common options.
+//
+// Note that Config intentionally supports only the most common options. More
+// unusual logging setups (logging to network connections or message queues,
+// splitting output between multiple files, etc.) are possible, but require
+// direct use of the zapcore package. For sample code, see the package-level
+// BasicConfiguration and AdvancedConfiguration examples.
+//
+// For an example showing runtime log level changes, see the documentation for
+// AtomicLevel.
+type Config struct {
+ // Level is the minimum enabled logging level. Note that this is a dynamic
+ // level, so calling Config.Level.SetLevel will atomically change the log
+ // level of all loggers descended from this config.
+ Level AtomicLevel `json:"level" yaml:"level"`
+ // Development puts the logger in development mode, which changes the
+ // behavior of DPanicLevel and takes stacktraces more liberally.
+ Development bool `json:"development" yaml:"development"`
+ // DisableCaller stops annotating logs with the calling function's file
+ // name and line number. By default, all logs are annotated.
+ DisableCaller bool `json:"disableCaller" yaml:"disableCaller"`
+ // DisableStacktrace completely disables automatic stacktrace capturing. By
+ // default, stacktraces are captured for WarnLevel and above logs in
+ // development and ErrorLevel and above in production.
+ DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"`
+ // Sampling sets a sampling policy. A nil SamplingConfig disables sampling.
+ Sampling *SamplingConfig `json:"sampling" yaml:"sampling"`
+ // Encoding sets the logger's encoding. Valid values are "json" and
+ // "console", as well as any third-party encodings registered via
+ // RegisterEncoder.
+ Encoding string `json:"encoding" yaml:"encoding"`
+ // EncoderConfig sets options for the chosen encoder. See
+ // zapcore.EncoderConfig for details.
+ EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"`
+ // OutputPaths is a list of URLs or file paths to write logging output to.
+ // See Open for details.
+ OutputPaths []string `json:"outputPaths" yaml:"outputPaths"`
+ // ErrorOutputPaths is a list of URLs to write internal logger errors to.
+ // The default is standard error.
+ //
+ // Note that this setting only affects internal errors; for sample code that
+ // sends error-level logs to a different location from info- and debug-level
+ // logs, see the package-level AdvancedConfiguration example.
+ ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"`
+ // InitialFields is a collection of fields to add to the root logger.
+ InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"`
+}
+
+// NewProductionEncoderConfig returns an opinionated EncoderConfig for
+// production environments.
+func NewProductionEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ TimeKey: "ts",
+ LevelKey: "level",
+ NameKey: "logger",
+ CallerKey: "caller",
+ FunctionKey: zapcore.OmitKey,
+ MessageKey: "msg",
+ StacktraceKey: "stacktrace",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.EpochTimeEncoder,
+ EncodeDuration: zapcore.SecondsDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewProductionConfig is a reasonable production logging configuration.
+// Logging is enabled at InfoLevel and above.
+//
+// It uses a JSON encoder, writes to standard error, and enables sampling.
+// Stacktraces are automatically included on logs of ErrorLevel and above.
+func NewProductionConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(InfoLevel),
+ Development: false,
+ Sampling: &SamplingConfig{
+ Initial: 100,
+ Thereafter: 100,
+ },
+ Encoding: "json",
+ EncoderConfig: NewProductionEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
+// development environments.
+func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ // Keys can be anything except the empty string.
+ TimeKey: "T",
+ LevelKey: "L",
+ NameKey: "N",
+ CallerKey: "C",
+ FunctionKey: zapcore.OmitKey,
+ MessageKey: "M",
+ StacktraceKey: "S",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.CapitalLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewDevelopmentConfig is a reasonable development logging configuration.
+// Logging is enabled at DebugLevel and above.
+//
+// It enables development mode (which makes DPanicLevel logs panic), uses a
+// console encoder, writes to standard error, and disables sampling.
+// Stacktraces are automatically included on logs of WarnLevel and above.
+func NewDevelopmentConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(DebugLevel),
+ Development: true,
+ Encoding: "console",
+ EncoderConfig: NewDevelopmentEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// Build constructs a logger from the Config and Options.
+func (cfg Config) Build(opts ...Option) (*Logger, error) {
+ enc, err := cfg.buildEncoder()
+ if err != nil {
+ return nil, err
+ }
+
+ sink, errSink, err := cfg.openSinks()
+ if err != nil {
+ return nil, err
+ }
+
+ if cfg.Level == (AtomicLevel{}) {
+ return nil, fmt.Errorf("missing Level")
+ }
+
+ log := New(
+ zapcore.NewCore(enc, sink, cfg.Level),
+ cfg.buildOptions(errSink)...,
+ )
+ if len(opts) > 0 {
+ log = log.WithOptions(opts...)
+ }
+ return log, nil
+}
+
+func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option {
+ opts := []Option{ErrorOutput(errSink)}
+
+ if cfg.Development {
+ opts = append(opts, Development())
+ }
+
+ if !cfg.DisableCaller {
+ opts = append(opts, AddCaller())
+ }
+
+ stackLevel := ErrorLevel
+ if cfg.Development {
+ stackLevel = WarnLevel
+ }
+ if !cfg.DisableStacktrace {
+ opts = append(opts, AddStacktrace(stackLevel))
+ }
+
+ if scfg := cfg.Sampling; scfg != nil {
+ opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core {
+ var samplerOpts []zapcore.SamplerOption
+ if scfg.Hook != nil {
+ samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook))
+ }
+ return zapcore.NewSamplerWithOptions(
+ core,
+ time.Second,
+ cfg.Sampling.Initial,
+ cfg.Sampling.Thereafter,
+ samplerOpts...,
+ )
+ }))
+ }
+
+ if len(cfg.InitialFields) > 0 {
+ fs := make([]Field, 0, len(cfg.InitialFields))
+ keys := make([]string, 0, len(cfg.InitialFields))
+ for k := range cfg.InitialFields {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ fs = append(fs, Any(k, cfg.InitialFields[k]))
+ }
+ opts = append(opts, Fields(fs...))
+ }
+
+ return opts
+}
+
+func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
+ sink, closeOut, err := Open(cfg.OutputPaths...)
+ if err != nil {
+ return nil, nil, err
+ }
+ errSink, _, err := Open(cfg.ErrorOutputPaths...)
+ if err != nil {
+ closeOut()
+ return nil, nil, err
+ }
+ return sink, errSink, nil
+}
+
+func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
+ return newEncoder(cfg.Encoding, cfg.EncoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
new file mode 100644
index 0000000..8638dd1
--- /dev/null
+++ b/vendor/go.uber.org/zap/doc.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zap provides fast, structured, leveled logging.
+//
+// For applications that log in the hot path, reflection-based serialization
+// and string formatting are prohibitively expensive - they're CPU-intensive
+// and make many small allocations. Put differently, using json.Marshal and
+// fmt.Fprintf to log tons of interface{} makes your application slow.
+//
+// Zap takes a different approach. It includes a reflection-free,
+// zero-allocation JSON encoder, and the base Logger strives to avoid
+// serialization overhead and allocations wherever possible. By building the
+// high-level SugaredLogger on that foundation, zap lets users choose when
+// they need to count every allocation and when they'd prefer a more familiar,
+// loosely typed API.
+//
+// Choosing a Logger
+//
+// In contexts where performance is nice, but not critical, use the
+// SugaredLogger. It's 4-10x faster than other structured logging packages and
+// supports both structured and printf-style logging. Like log15 and go-kit,
+// the SugaredLogger's structured logging APIs are loosely typed and accept a
+// variadic number of key-value pairs. (For more advanced use cases, they also
+// accept strongly typed fields - see the SugaredLogger.With documentation for
+// details.)
+// sugar := zap.NewExample().Sugar()
+// defer sugar.Sync()
+// sugar.Infow("failed to fetch URL",
+// "url", "http://example.com",
+// "attempt", 3,
+// "backoff", time.Second,
+// )
+// sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+// By default, loggers are unbuffered. However, since zap's low-level APIs
+// allow buffering, calling Sync before letting your process exit is a good
+// habit.
+//
+// In the rare contexts where every microsecond and every allocation matter,
+// use the Logger. It's even faster than the SugaredLogger and allocates far
+// less, but it only supports strongly-typed, structured logging.
+// logger := zap.NewExample()
+// defer logger.Sync()
+// logger.Info("failed to fetch URL",
+// zap.String("url", "http://example.com"),
+// zap.Int("attempt", 3),
+// zap.Duration("backoff", time.Second),
+// )
+//
+// Choosing between the Logger and SugaredLogger doesn't need to be an
+// application-wide decision: converting between the two is simple and
+// inexpensive.
+// logger := zap.NewExample()
+// defer logger.Sync()
+// sugar := logger.Sugar()
+// plain := sugar.Desugar()
+//
+// Configuring Zap
+//
+// The simplest way to build a Logger is to use zap's opinionated presets:
+// NewExample, NewProduction, and NewDevelopment. These presets build a logger
+// with a single function call:
+// logger, err := zap.NewProduction()
+// if err != nil {
+// log.Fatalf("can't initialize zap logger: %v", err)
+// }
+// defer logger.Sync()
+//
+// Presets are fine for small projects, but larger projects and organizations
+// naturally require a bit more customization. For most users, zap's Config
+// struct strikes the right balance between flexibility and convenience. See
+// the package-level BasicConfiguration example for sample code.
+//
+// More unusual configurations (splitting output between files, sending logs
+// to a message queue, etc.) are possible, but require direct use of
+// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
+// example for sample code.
+//
+// Extending Zap
+//
+// The zap package itself is a relatively thin wrapper around the interfaces
+// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
+// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an
+// exception aggregation service, like Sentry or Rollbar) typically requires
+// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core
+// interfaces. See the zapcore documentation for details.
+//
+// Similarly, package authors can use the high-performance Encoder and Core
+// implementations in the zapcore package to build their own loggers.
+//
+// Frequently Asked Questions
+//
+// An FAQ covering everything from installation errors to design decisions is
+// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
+package zap // import "go.uber.org/zap"
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
new file mode 100644
index 0000000..08ed833
--- /dev/null
+++ b/vendor/go.uber.org/zap/encoder.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+var (
+ errNoEncoderNameSpecified = errors.New("no encoder name specified")
+
+ _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
+ "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewConsoleEncoder(encoderConfig), nil
+ },
+ "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewJSONEncoder(encoderConfig), nil
+ },
+ }
+ _encoderMutex sync.RWMutex
+)
+
+// RegisterEncoder registers an encoder constructor, which the Config struct
+// can then reference. By default, the "json" and "console" encoders are
+// registered.
+//
+// Attempting to register an encoder whose name is already taken returns an
+// error.
+func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error {
+ _encoderMutex.Lock()
+ defer _encoderMutex.Unlock()
+ if name == "" {
+ return errNoEncoderNameSpecified
+ }
+ if _, ok := _encoderNameToConstructor[name]; ok {
+ return fmt.Errorf("encoder already registered for name %q", name)
+ }
+ _encoderNameToConstructor[name] = constructor
+ return nil
+}
+
+func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
+ return nil, fmt.Errorf("missing EncodeTime in EncoderConfig")
+ }
+
+ _encoderMutex.RLock()
+ defer _encoderMutex.RUnlock()
+ if name == "" {
+ return nil, errNoEncoderNameSpecified
+ }
+ constructor, ok := _encoderNameToConstructor[name]
+ if !ok {
+ return nil, fmt.Errorf("no encoder registered for name %q", name)
+ }
+ return constructor(encoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
new file mode 100644
index 0000000..65982a5
--- /dev/null
+++ b/vendor/go.uber.org/zap/error.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+var _errArrayElemPool = sync.Pool{New: func() interface{} {
+ return &errArrayElem{}
+}}
+
+// Error is shorthand for the common idiom NamedError("error", err).
+func Error(err error) Field {
+ return NamedError("error", err)
+}
+
+// NamedError constructs a field that lazily stores err.Error() under the
+// provided key. Errors which also implement fmt.Formatter (like those produced
+// by github.com/pkg/errors) will also have their verbose representation stored
+// under key+"Verbose". If passed a nil error, the field is a no-op.
+//
+// For the common case in which the key is simply "error", the Error function
+// is shorter and less repetitive.
+func NamedError(key string, err error) Field {
+ if err == nil {
+ return Skip()
+ }
+ return Field{Key: key, Type: zapcore.ErrorType, Interface: err}
+}
+
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+ // To represent each error as an object with an "error" attribute and
+ // potentially an "errorVerbose" attribute, we need to wrap it in a
+ // type that implements LogObjectMarshaler. To prevent this from
+ // allocating, pool the wrapper type.
+ elem := _errArrayElemPool.Get().(*errArrayElem)
+ elem.error = errs[i]
+ arr.AppendObject(elem)
+ elem.error = nil
+ _errArrayElemPool.Put(elem)
+ }
+ return nil
+}
+
+type errArrayElem struct {
+ error
+}
+
+func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ // Re-use the error field's logic, which supports non-standard error types.
+ Error(e.error).AddTo(enc)
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
new file mode 100644
index 0000000..bbb745d
--- /dev/null
+++ b/vendor/go.uber.org/zap/field.go
@@ -0,0 +1,549 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Field is an alias for Field. Aliasing this type dramatically
+// improves the navigability of this package's API documentation.
+type Field = zapcore.Field
+
+var (
+ _minTimeInt64 = time.Unix(0, math.MinInt64)
+ _maxTimeInt64 = time.Unix(0, math.MaxInt64)
+)
+
+// Skip constructs a no-op field, which is often useful when handling invalid
+// inputs in other Field constructors.
+func Skip() Field {
+ return Field{Type: zapcore.SkipType}
+}
+
+// nilField returns a field which will marshal explicitly as nil. See motivation
+// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking
+// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the
+// implementation here should be changed to reflect that.
+func nilField(key string) Field { return Reflect(key, nil) }
+
+// Binary constructs a field that carries an opaque binary blob.
+//
+// Binary data is serialized in an encoding-appropriate format. For example,
+// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text,
+// use ByteString.
+func Binary(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.BinaryType, Interface: val}
+}
+
+// Bool constructs a field that carries a bool.
+func Bool(key string, val bool) Field {
+ var ival int64
+ if val {
+ ival = 1
+ }
+ return Field{Key: key, Type: zapcore.BoolType, Integer: ival}
+}
+
+// Boolp constructs a field that carries a *bool. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Boolp(key string, val *bool) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Bool(key, *val)
+}
+
+// ByteString constructs a field that carries UTF-8 encoded text as a []byte.
+// To log opaque binary blobs (which aren't necessarily valid UTF-8), use
+// Binary.
+func ByteString(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.ByteStringType, Interface: val}
+}
+
+// Complex128 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex128 to
+// interface{}).
+func Complex128(key string, val complex128) Field {
+ return Field{Key: key, Type: zapcore.Complex128Type, Interface: val}
+}
+
+// Complex128p constructs a field that carries a *complex128. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Complex128p(key string, val *complex128) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Complex128(key, *val)
+}
+
+// Complex64 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex64 to
+// interface{}).
+func Complex64(key string, val complex64) Field {
+ return Field{Key: key, Type: zapcore.Complex64Type, Interface: val}
+}
+
+// Complex64p constructs a field that carries a *complex64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Complex64p(key string, val *complex64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Complex64(key, *val)
+}
+
+// Float64 constructs a field that carries a float64. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float64(key string, val float64) Field {
+ return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))}
+}
+
+// Float64p constructs a field that carries a *float64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Float64p(key string, val *float64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Float64(key, *val)
+}
+
+// Float32 constructs a field that carries a float32. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float32(key string, val float32) Field {
+ return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))}
+}
+
+// Float32p constructs a field that carries a *float32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Float32p(key string, val *float32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Float32(key, *val)
+}
+
+// Int constructs a field with the given key and value.
+func Int(key string, val int) Field {
+ return Int64(key, int64(val))
+}
+
+// Intp constructs a field that carries a *int. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Intp(key string, val *int) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int(key, *val)
+}
+
+// Int64 constructs a field with the given key and value.
+func Int64(key string, val int64) Field {
+ return Field{Key: key, Type: zapcore.Int64Type, Integer: val}
+}
+
+// Int64p constructs a field that carries a *int64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int64p(key string, val *int64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int64(key, *val)
+}
+
+// Int32 constructs a field with the given key and value.
+func Int32(key string, val int32) Field {
+ return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)}
+}
+
+// Int32p constructs a field that carries a *int32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int32p(key string, val *int32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int32(key, *val)
+}
+
+// Int16 constructs a field with the given key and value.
+func Int16(key string, val int16) Field {
+ return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)}
+}
+
+// Int16p constructs a field that carries a *int16. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int16p(key string, val *int16) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int16(key, *val)
+}
+
+// Int8 constructs a field with the given key and value.
+func Int8(key string, val int8) Field {
+ return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)}
+}
+
+// Int8p constructs a field that carries a *int8. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int8p(key string, val *int8) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int8(key, *val)
+}
+
+// String constructs a field with the given key and value.
+func String(key string, val string) Field {
+ return Field{Key: key, Type: zapcore.StringType, String: val}
+}
+
+// Stringp constructs a field that carries a *string. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Stringp(key string, val *string) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return String(key, *val)
+}
+
+// Uint constructs a field with the given key and value.
+func Uint(key string, val uint) Field {
+ return Uint64(key, uint64(val))
+}
+
+// Uintp constructs a field that carries a *uint. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uintp(key string, val *uint) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint(key, *val)
+}
+
+// Uint64 constructs a field with the given key and value.
+func Uint64(key string, val uint64) Field {
+ return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)}
+}
+
+// Uint64p constructs a field that carries a *uint64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint64p(key string, val *uint64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint64(key, *val)
+}
+
+// Uint32 constructs a field with the given key and value.
+func Uint32(key string, val uint32) Field {
+ return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)}
+}
+
+// Uint32p constructs a field that carries a *uint32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint32p(key string, val *uint32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint32(key, *val)
+}
+
+// Uint16 constructs a field with the given key and value.
+func Uint16(key string, val uint16) Field {
+ return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)}
+}
+
+// Uint16p constructs a field that carries a *uint16. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint16p(key string, val *uint16) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint16(key, *val)
+}
+
+// Uint8 constructs a field with the given key and value.
+func Uint8(key string, val uint8) Field {
+ return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)}
+}
+
+// Uint8p constructs a field that carries a *uint8. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint8p(key string, val *uint8) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint8(key, *val)
+}
+
+// Uintptr constructs a field with the given key and value.
+func Uintptr(key string, val uintptr) Field {
+ return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)}
+}
+
+// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uintptrp(key string, val *uintptr) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uintptr(key, *val)
+}
+
+// Reflect constructs a field with the given key and an arbitrary object. It uses
+// an encoding-appropriate, reflection-based function to lazily serialize nearly
+// any object into the logging context, but it's relatively slow and
+// allocation-heavy. Outside tests, Any is always a better choice.
+//
+// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect
+// includes the error message in the final log output.
+func Reflect(key string, val interface{}) Field {
+ return Field{Key: key, Type: zapcore.ReflectType, Interface: val}
+}
+
+// Namespace creates a named, isolated scope within the logger's context. All
+// subsequent fields will be added to the new namespace.
+//
+// This helps prevent key collisions when injecting loggers into sub-components
+// or third-party libraries.
+func Namespace(key string) Field {
+ return Field{Key: key, Type: zapcore.NamespaceType}
+}
+
+// Stringer constructs a field with the given key and the output of the value's
+// String method. The Stringer's String method is called lazily.
+func Stringer(key string, val fmt.Stringer) Field {
+ return Field{Key: key, Type: zapcore.StringerType, Interface: val}
+}
+
+// Time constructs a Field with the given key and value. The encoder
+// controls how the time is serialized.
+func Time(key string, val time.Time) Field {
+ if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) {
+ return Field{Key: key, Type: zapcore.TimeFullType, Interface: val}
+ }
+ return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()}
+}
+
+// Timep constructs a field that carries a *time.Time. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Timep(key string, val *time.Time) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Time(key, *val)
+}
+
+// Stack constructs a field that stores a stacktrace of the current goroutine
+// under provided key. Keep in mind that taking a stacktrace is eager and
+// expensive (relatively speaking); this function both makes an allocation and
+// takes about two microseconds.
+func Stack(key string) Field {
+ return StackSkip(key, 1) // skip Stack
+}
+
+// StackSkip constructs a field similarly to Stack, but also skips the given
+// number of frames from the top of the stacktrace.
+func StackSkip(key string, skip int) Field {
+ // Returning the stacktrace as a string costs an allocation, but saves us
+ // from expanding the zapcore.Field union struct to include a byte slice. Since
+ // taking a stacktrace is already so expensive (~10us), the extra allocation
+ // is okay.
+ return String(key, takeStacktrace(skip+1)) // skip StackSkip
+}
+
+// Duration constructs a field with the given key and value. The encoder
+// controls how the duration is serialized.
+func Duration(key string, val time.Duration) Field {
+ return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)}
+}
+
+// Durationp constructs a field that carries a *time.Duration. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Durationp(key string, val *time.Duration) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Duration(key, *val)
+}
+
+// Object constructs a field with the given key and ObjectMarshaler. It
+// provides a flexible, but still type-safe and efficient, way to add map- or
+// struct-like user-defined types to the logging context. The struct's
+// MarshalLogObject method is called lazily.
+func Object(key string, val zapcore.ObjectMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val}
+}
+
+// Inline constructs a Field that is similar to Object, but it
+// will add the elements of the provided ObjectMarshaler to the
+// current namespace.
+func Inline(val zapcore.ObjectMarshaler) Field {
+ return zapcore.Field{
+ Type: zapcore.InlineMarshalerType,
+ Interface: val,
+ }
+}
+
+// Any takes a key and an arbitrary value and chooses the best way to represent
+// them as a field, falling back to a reflection-based approach only if
+// necessary.
+//
+// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between
+// them. To minimize surprises, []byte values are treated as binary blobs, byte
+// values are treated as uint8, and runes are always treated as integers.
+func Any(key string, value interface{}) Field {
+ switch val := value.(type) {
+ case zapcore.ObjectMarshaler:
+ return Object(key, val)
+ case zapcore.ArrayMarshaler:
+ return Array(key, val)
+ case bool:
+ return Bool(key, val)
+ case *bool:
+ return Boolp(key, val)
+ case []bool:
+ return Bools(key, val)
+ case complex128:
+ return Complex128(key, val)
+ case *complex128:
+ return Complex128p(key, val)
+ case []complex128:
+ return Complex128s(key, val)
+ case complex64:
+ return Complex64(key, val)
+ case *complex64:
+ return Complex64p(key, val)
+ case []complex64:
+ return Complex64s(key, val)
+ case float64:
+ return Float64(key, val)
+ case *float64:
+ return Float64p(key, val)
+ case []float64:
+ return Float64s(key, val)
+ case float32:
+ return Float32(key, val)
+ case *float32:
+ return Float32p(key, val)
+ case []float32:
+ return Float32s(key, val)
+ case int:
+ return Int(key, val)
+ case *int:
+ return Intp(key, val)
+ case []int:
+ return Ints(key, val)
+ case int64:
+ return Int64(key, val)
+ case *int64:
+ return Int64p(key, val)
+ case []int64:
+ return Int64s(key, val)
+ case int32:
+ return Int32(key, val)
+ case *int32:
+ return Int32p(key, val)
+ case []int32:
+ return Int32s(key, val)
+ case int16:
+ return Int16(key, val)
+ case *int16:
+ return Int16p(key, val)
+ case []int16:
+ return Int16s(key, val)
+ case int8:
+ return Int8(key, val)
+ case *int8:
+ return Int8p(key, val)
+ case []int8:
+ return Int8s(key, val)
+ case string:
+ return String(key, val)
+ case *string:
+ return Stringp(key, val)
+ case []string:
+ return Strings(key, val)
+ case uint:
+ return Uint(key, val)
+ case *uint:
+ return Uintp(key, val)
+ case []uint:
+ return Uints(key, val)
+ case uint64:
+ return Uint64(key, val)
+ case *uint64:
+ return Uint64p(key, val)
+ case []uint64:
+ return Uint64s(key, val)
+ case uint32:
+ return Uint32(key, val)
+ case *uint32:
+ return Uint32p(key, val)
+ case []uint32:
+ return Uint32s(key, val)
+ case uint16:
+ return Uint16(key, val)
+ case *uint16:
+ return Uint16p(key, val)
+ case []uint16:
+ return Uint16s(key, val)
+ case uint8:
+ return Uint8(key, val)
+ case *uint8:
+ return Uint8p(key, val)
+ case []byte:
+ return Binary(key, val)
+ case uintptr:
+ return Uintptr(key, val)
+ case *uintptr:
+ return Uintptrp(key, val)
+ case []uintptr:
+ return Uintptrs(key, val)
+ case time.Time:
+ return Time(key, val)
+ case *time.Time:
+ return Timep(key, val)
+ case []time.Time:
+ return Times(key, val)
+ case time.Duration:
+ return Duration(key, val)
+ case *time.Duration:
+ return Durationp(key, val)
+ case []time.Duration:
+ return Durations(key, val)
+ case error:
+ return NamedError(key, val)
+ case []error:
+ return Errors(key, val)
+ case fmt.Stringer:
+ return Stringer(key, val)
+ default:
+ return Reflect(key, val)
+ }
+}
diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go
new file mode 100644
index 0000000..1312875
--- /dev/null
+++ b/vendor/go.uber.org/zap/flag.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "flag"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// LevelFlag uses the standard library's flag.Var to declare a global flag
+// with the specified name, default, and usage guidance. The returned value is
+// a pointer to the value of the flag.
+//
+// If you don't want to use the flag package's global state, you can use any
+// non-nil *Level as a flag.Value with your own *flag.FlagSet.
+func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level {
+ lvl := defaultLevel
+ flag.Var(&lvl, name, usage)
+ return &lvl
+}
diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml
new file mode 100644
index 0000000..8e1d05e
--- /dev/null
+++ b/vendor/go.uber.org/zap/glide.yaml
@@ -0,0 +1,34 @@
+package: go.uber.org/zap
+license: MIT
+import:
+- package: go.uber.org/atomic
+ version: ^1
+- package: go.uber.org/multierr
+ version: ^1
+testImport:
+- package: github.com/satori/go.uuid
+- package: github.com/sirupsen/logrus
+- package: github.com/apex/log
+ subpackages:
+ - handlers/json
+- package: github.com/go-kit/kit
+ subpackages:
+ - log
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
+- package: gopkg.in/inconshreveable/log15.v2
+- package: github.com/mattn/goveralls
+- package: github.com/pborman/uuid
+- package: github.com/pkg/errors
+- package: github.com/rs/zerolog
+- package: golang.org/x/tools
+ subpackages:
+ - cover
+- package: golang.org/x/lint
+ subpackages:
+ - golint
+- package: github.com/axw/gocov
+ subpackages:
+ - gocov
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
new file mode 100644
index 0000000..c1ac050
--- /dev/null
+++ b/vendor/go.uber.org/zap/global.go
@@ -0,0 +1,168 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ _loggerWriterDepth = 2
+ _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
+ "https://github.com/uber-go/zap/issues/new and reference this error: %v"
+)
+
+var (
+ _globalMu sync.RWMutex
+ _globalL = NewNop()
+ _globalS = _globalL.Sugar()
+)
+
+// L returns the global Logger, which can be reconfigured with ReplaceGlobals.
+// It's safe for concurrent use.
+func L() *Logger {
+ _globalMu.RLock()
+ l := _globalL
+ _globalMu.RUnlock()
+ return l
+}
+
+// S returns the global SugaredLogger, which can be reconfigured with
+// ReplaceGlobals. It's safe for concurrent use.
+func S() *SugaredLogger {
+ _globalMu.RLock()
+ s := _globalS
+ _globalMu.RUnlock()
+ return s
+}
+
+// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a
+// function to restore the original values. It's safe for concurrent use.
+func ReplaceGlobals(logger *Logger) func() {
+ _globalMu.Lock()
+ prev := _globalL
+ _globalL = logger
+ _globalS = logger.Sugar()
+ _globalMu.Unlock()
+ return func() { ReplaceGlobals(prev) }
+}
+
+// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at
+// InfoLevel. To redirect the standard library's package-global logging
+// functions, use RedirectStdLog instead.
+func NewStdLog(l *Logger) *log.Logger {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ f := logger.Info
+ return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */)
+}
+
+// NewStdLogAt returns *log.Logger which writes to supplied zap logger at
+// required level.
+func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil
+}
+
+// RedirectStdLog redirects output from the standard library's package-global
+// logger to the supplied logger at InfoLevel. Since zap already handles caller
+// annotations, timestamps, etc., it automatically disables the standard
+// library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLog(l *Logger) func() {
+ f, err := redirectStdLogAt(l, InfoLevel)
+ if err != nil {
+ // Can't get here, since passing InfoLevel to redirectStdLogAt always
+ // works.
+ panic(fmt.Sprintf(_programmerErrorTemplate, err))
+ }
+ return f
+}
+
+// RedirectStdLogAt redirects output from the standard library's package-global
+// logger to the supplied logger at the specified level. Since zap already
+// handles caller annotations, timestamps, etc., it automatically disables the
+// standard library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ return redirectStdLogAt(l, level)
+}
+
+func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ flags := log.Flags()
+ prefix := log.Prefix()
+ log.SetFlags(0)
+ log.SetPrefix("")
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ log.SetOutput(&loggerWriter{logFunc})
+ return func() {
+ log.SetFlags(flags)
+ log.SetPrefix(prefix)
+ log.SetOutput(os.Stderr)
+ }, nil
+}
+
+func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) {
+ switch lvl {
+ case DebugLevel:
+ return logger.Debug, nil
+ case InfoLevel:
+ return logger.Info, nil
+ case WarnLevel:
+ return logger.Warn, nil
+ case ErrorLevel:
+ return logger.Error, nil
+ case DPanicLevel:
+ return logger.DPanic, nil
+ case PanicLevel:
+ return logger.Panic, nil
+ case FatalLevel:
+ return logger.Fatal, nil
+ }
+ return nil, fmt.Errorf("unrecognized level: %q", lvl)
+}
+
+type loggerWriter struct {
+ logFunc func(msg string, fields ...Field)
+}
+
+func (l *loggerWriter) Write(p []byte) (int, error) {
+ p = bytes.TrimSpace(p)
+ l.logFunc(string(p))
+ return len(p), nil
+}
diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go
new file mode 100644
index 0000000..6b5dbda
--- /dev/null
+++ b/vendor/go.uber.org/zap/global_go112.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// See #682 for more information.
+// +build go1.12
+
+package zap
+
+const _stdLogDefaultDepth = 1
diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go
new file mode 100644
index 0000000..d3ab9af
--- /dev/null
+++ b/vendor/go.uber.org/zap/global_prego112.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// See #682 for more information.
+// +build !go1.12
+
+package zap
+
+const _stdLogDefaultDepth = 2
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
new file mode 100644
index 0000000..1297c33
--- /dev/null
+++ b/vendor/go.uber.org/zap/http_handler.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// ServeHTTP is a simple JSON endpoint that can report on or change the current
+// logging level.
+//
+// GET
+//
+// The GET request returns a JSON description of the current logging level like:
+// {"level":"info"}
+//
+// PUT
+//
+// The PUT request changes the logging level. It is perfectly safe to change the
+// logging level while a program is running. Two content types are supported:
+//
+// Content-Type: application/x-www-form-urlencoded
+//
+// With this content type, the level can be provided through the request body or
+// a query parameter. The log level is URL encoded like:
+//
+// level=debug
+//
+// The request body takes precedence over the query parameter, if both are
+// specified.
+//
+// This content type is the default for a curl PUT request. Following are two
+// example curl requests that both set the logging level to debug.
+//
+// curl -X PUT localhost:8080/log/level?level=debug
+// curl -X PUT localhost:8080/log/level -d level=debug
+//
+// For any other content type, the payload is expected to be JSON encoded and
+// look like:
+//
+// {"level":"info"}
+//
+// An example curl request could look like this:
+//
+// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
+//
+func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ type errorResponse struct {
+ Error string `json:"error"`
+ }
+ type payload struct {
+ Level zapcore.Level `json:"level"`
+ }
+
+ enc := json.NewEncoder(w)
+
+ switch r.Method {
+ case http.MethodGet:
+ enc.Encode(payload{Level: lvl.Level()})
+ case http.MethodPut:
+ requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ enc.Encode(errorResponse{Error: err.Error()})
+ return
+ }
+ lvl.SetLevel(requestedLvl)
+ enc.Encode(payload{Level: lvl.Level()})
+ default:
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ enc.Encode(errorResponse{
+ Error: "Only GET and PUT are supported.",
+ })
+ }
+}
+
+// Decodes incoming PUT requests and returns the requested logging level.
+func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) {
+ if contentType == "application/x-www-form-urlencoded" {
+ return decodePutURL(r)
+ }
+ return decodePutJSON(r.Body)
+}
+
+func decodePutURL(r *http.Request) (zapcore.Level, error) {
+ lvl := r.FormValue("level")
+ if lvl == "" {
+ return 0, fmt.Errorf("must specify logging level")
+ }
+ var l zapcore.Level
+ if err := l.UnmarshalText([]byte(lvl)); err != nil {
+ return 0, err
+ }
+ return l, nil
+}
+
+func decodePutJSON(body io.Reader) (zapcore.Level, error) {
+ var pld struct {
+ Level *zapcore.Level `json:"level"`
+ }
+ if err := json.NewDecoder(body).Decode(&pld); err != nil {
+ return 0, fmt.Errorf("malformed request body: %v", err)
+ }
+ if pld.Level == nil {
+ return 0, fmt.Errorf("must specify logging level")
+ }
+ return *pld.Level, nil
+
+}
diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
new file mode 100644
index 0000000..dad583a
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package bufferpool houses zap's shared internal buffer pool. Third-party
+// packages can recreate the same functionality with buffers.NewPool.
+package bufferpool
+
+import "go.uber.org/zap/buffer"
+
+var (
+ _pool = buffer.NewPool()
+ // Get retrieves a buffer from the pool, creating one if necessary.
+ Get = _pool.Get
+)
diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go
new file mode 100644
index 0000000..c4d5d02
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/color/color.go
@@ -0,0 +1,44 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package color adds coloring functionality for TTY output.
+package color
+
+import "fmt"
+
+// Foreground colors.
+const (
+ Black Color = iota + 30
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ White
+)
+
+// Color represents a text color.
+type Color uint8
+
+// Add adds the coloring to the given string.
+func (c Color) Add(s string) string {
+ return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s)
+}
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
new file mode 100644
index 0000000..dfc5b05
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package exit provides stubs so that unit tests can exercise code that calls
+// os.Exit(1).
+package exit
+
+import "os"
+
+var real = func() { os.Exit(1) }
+
+// Exit normally terminates the process by calling os.Exit(1). If the package
+// is stubbed, it instead records a call in the testing spy.
+func Exit() {
+ real()
+}
+
+// A StubbedExit is a testing fake for os.Exit.
+type StubbedExit struct {
+ Exited bool
+ prev func()
+}
+
+// Stub substitutes a fake for the call to os.Exit(1).
+func Stub() *StubbedExit {
+ s := &StubbedExit{prev: real}
+ real = s.exit
+ return s
+}
+
+// WithStub runs the supplied function with Exit stubbed. It returns the stub
+// used, so that users can test whether the process would have crashed.
+func WithStub(f func()) *StubbedExit {
+ s := Stub()
+ defer s.Unstub()
+ f()
+ return s
+}
+
+// Unstub restores the previous exit function.
+func (se *StubbedExit) Unstub() {
+ real = se.prev
+}
+
+func (se *StubbedExit) exit() {
+ se.Exited = true
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
new file mode 100644
index 0000000..3567a9a
--- /dev/null
+++ b/vendor/go.uber.org/zap/level.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "go.uber.org/atomic"
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel = zapcore.DebugLevel
+ // InfoLevel is the default logging priority.
+ InfoLevel = zapcore.InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel = zapcore.WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel = zapcore.ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel = zapcore.DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel = zapcore.PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel = zapcore.FatalLevel
+)
+
+// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with
+// an anonymous function.
+//
+// It's particularly useful when splitting log output between different
+// outputs (e.g., standard error and standard out). For sample code, see the
+// package-level AdvancedConfiguration example.
+type LevelEnablerFunc func(zapcore.Level) bool
+
+// Enabled calls the wrapped function.
+func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) }
+
+// An AtomicLevel is an atomically changeable, dynamic logging level. It lets
+// you safely change the log level of a tree of loggers (the root logger and
+// any children created by adding context) at runtime.
+//
+// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to
+// alter its level.
+//
+// AtomicLevels must be created with the NewAtomicLevel constructor to allocate
+// their internal atomic pointer.
+type AtomicLevel struct {
+ l *atomic.Int32
+}
+
+// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
+// enabled.
+func NewAtomicLevel() AtomicLevel {
+ return AtomicLevel{
+ l: atomic.NewInt32(int32(InfoLevel)),
+ }
+}
+
+// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
+// and then calls SetLevel with the given level.
+func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
+ a := NewAtomicLevel()
+ a.SetLevel(l)
+ return a
+}
+
+// Enabled implements the zapcore.LevelEnabler interface, which allows the
+// AtomicLevel to be used in place of traditional static levels.
+func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
+ return lvl.Level().Enabled(l)
+}
+
+// Level returns the minimum enabled log level.
+func (lvl AtomicLevel) Level() zapcore.Level {
+ return zapcore.Level(int8(lvl.l.Load()))
+}
+
+// SetLevel alters the logging level.
+func (lvl AtomicLevel) SetLevel(l zapcore.Level) {
+ lvl.l.Store(int32(l))
+}
+
+// String returns the string representation of the underlying Level.
+func (lvl AtomicLevel) String() string {
+ return lvl.Level().String()
+}
+
+// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text
+// representations as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl *AtomicLevel) UnmarshalText(text []byte) error {
+ if lvl.l == nil {
+ lvl.l = &atomic.Int32{}
+ }
+
+ var l zapcore.Level
+ if err := l.UnmarshalText(text); err != nil {
+ return err
+ }
+
+ lvl.SetLevel(l)
+ return nil
+}
+
+// MarshalText marshals the AtomicLevel to a byte slice. It uses the same
+// text representation as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl AtomicLevel) MarshalText() (text []byte, err error) {
+ return lvl.Level().MarshalText()
+}
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
new file mode 100644
index 0000000..f116bd9
--- /dev/null
+++ b/vendor/go.uber.org/zap/logger.go
@@ -0,0 +1,348 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strings"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// A Logger provides fast, leveled, structured logging. All methods are safe
+// for concurrent use.
+//
+// The Logger is designed for contexts in which every microsecond and every
+// allocation matters, so its API intentionally favors performance and type
+// safety over brevity. For most applications, the SugaredLogger strikes a
+// better balance between performance and ergonomics.
+type Logger struct {
+ core zapcore.Core
+
+ development bool
+ addCaller bool
+ onFatal zapcore.CheckWriteAction // default is WriteThenFatal
+
+ name string
+ errorOutput zapcore.WriteSyncer
+
+ addStack zapcore.LevelEnabler
+
+ callerSkip int
+
+ clock zapcore.Clock
+}
+
+// New constructs a new Logger from the provided zapcore.Core and Options. If
+// the passed zapcore.Core is nil, it falls back to using a no-op
+// implementation.
+//
+// This is the most flexible way to construct a Logger, but also the most
+// verbose. For typical use cases, the highly-opinionated presets
+// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
+// more convenient.
+//
+// For sample code, see the package-level AdvancedConfiguration example.
+func New(core zapcore.Core, options ...Option) *Logger {
+ if core == nil {
+ return NewNop()
+ }
+ log := &Logger{
+ core: core,
+ errorOutput: zapcore.Lock(os.Stderr),
+ addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
+ }
+ return log.WithOptions(options...)
+}
+
+// NewNop returns a no-op Logger. It never writes out logs or internal errors,
+// and it never runs user-defined hooks.
+//
+// Using WithOptions to replace the Core or error output of a no-op Logger can
+// re-enable logging.
+func NewNop() *Logger {
+ return &Logger{
+ core: zapcore.NewNopCore(),
+ errorOutput: zapcore.AddSync(ioutil.Discard),
+ addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
+ }
+}
+
+// NewProduction builds a sensible production Logger that writes InfoLevel and
+// above logs to standard error as JSON.
+//
+// It's a shortcut for NewProductionConfig().Build(...Option).
+func NewProduction(options ...Option) (*Logger, error) {
+ return NewProductionConfig().Build(options...)
+}
+
+// NewDevelopment builds a development Logger that writes DebugLevel and above
+// logs to standard error in a human-friendly format.
+//
+// It's a shortcut for NewDevelopmentConfig().Build(...Option).
+func NewDevelopment(options ...Option) (*Logger, error) {
+ return NewDevelopmentConfig().Build(options...)
+}
+
+// NewExample builds a Logger that's designed for use in zap's testable
+// examples. It writes DebugLevel and above logs to standard out as JSON, but
+// omits the timestamp and calling function to keep example output
+// short and deterministic.
+func NewExample(options ...Option) *Logger {
+ encoderCfg := zapcore.EncoderConfig{
+ MessageKey: "msg",
+ LevelKey: "level",
+ NameKey: "logger",
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ }
+ core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel)
+ return New(core).WithOptions(options...)
+}
+
+// Sugar wraps the Logger to provide a more ergonomic, but slightly slower,
+// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a
+// single application to use both Loggers and SugaredLoggers, converting
+// between them on the boundaries of performance-sensitive code.
+func (log *Logger) Sugar() *SugaredLogger {
+ core := log.clone()
+ core.callerSkip += 2
+ return &SugaredLogger{core}
+}
+
+// Named adds a new path segment to the logger's name. Segments are joined by
+// periods. By default, Loggers are unnamed.
+func (log *Logger) Named(s string) *Logger {
+ if s == "" {
+ return log
+ }
+ l := log.clone()
+ if log.name == "" {
+ l.name = s
+ } else {
+ l.name = strings.Join([]string{l.name, s}, ".")
+ }
+ return l
+}
+
+// WithOptions clones the current Logger, applies the supplied Options, and
+// returns the resulting Logger. It's safe to use concurrently.
+func (log *Logger) WithOptions(opts ...Option) *Logger {
+ c := log.clone()
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+ return c
+}
+
+// With creates a child logger and adds structured context to it. Fields added
+// to the child don't affect the parent, and vice versa.
+func (log *Logger) With(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ l := log.clone()
+ l.core = l.core.With(fields)
+ return l
+}
+
+// Check returns a CheckedEntry if logging a message at the specified level
+// is enabled. It's a completely optional optimization; in high-performance
+// applications, Check can help avoid allocating a slice to hold fields.
+func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ return log.check(lvl, msg)
+}
+
+// Debug logs a message at DebugLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Debug(msg string, fields ...Field) {
+ if ce := log.check(DebugLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Info logs a message at InfoLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Info(msg string, fields ...Field) {
+ if ce := log.check(InfoLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Warn logs a message at WarnLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Warn(msg string, fields ...Field) {
+ if ce := log.check(WarnLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Error logs a message at ErrorLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Error(msg string, fields ...Field) {
+ if ce := log.check(ErrorLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// DPanic logs a message at DPanicLevel. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+//
+// If the logger is in development mode, it then panics (DPanic means
+// "development panic"). This is useful for catching errors that are
+// recoverable, but shouldn't ever happen.
+func (log *Logger) DPanic(msg string, fields ...Field) {
+ if ce := log.check(DPanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Panic logs a message at PanicLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then panics, even if logging at PanicLevel is disabled.
+func (log *Logger) Panic(msg string, fields ...Field) {
+ if ce := log.check(PanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Fatal logs a message at FatalLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then calls os.Exit(1), even if logging at FatalLevel is
+// disabled.
+func (log *Logger) Fatal(msg string, fields ...Field) {
+ if ce := log.check(FatalLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Sync calls the underlying Core's Sync method, flushing any buffered log
+// entries. Applications should take care to call Sync before exiting.
+func (log *Logger) Sync() error {
+ return log.core.Sync()
+}
+
+// Core returns the Logger's underlying zapcore.Core.
+func (log *Logger) Core() zapcore.Core {
+ return log.core
+}
+
+func (log *Logger) clone() *Logger {
+ copy := *log
+ return ©
+}
+
+func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ // check must always be called directly by a method in the Logger interface
+ // (e.g., Check, Info, Fatal).
+ const callerSkipOffset = 2
+
+ // Check the level first to reduce the cost of disabled log calls.
+ // Since Panic and higher may exit, we skip the optimization for those levels.
+ if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) {
+ return nil
+ }
+
+ // Create basic checked entry thru the core; this will be non-nil if the
+ // log message will actually be written somewhere.
+ ent := zapcore.Entry{
+ LoggerName: log.name,
+ Time: log.clock.Now(),
+ Level: lvl,
+ Message: msg,
+ }
+ ce := log.core.Check(ent, nil)
+ willWrite := ce != nil
+
+ // Set up any required terminal behavior.
+ switch ent.Level {
+ case zapcore.PanicLevel:
+ ce = ce.Should(ent, zapcore.WriteThenPanic)
+ case zapcore.FatalLevel:
+ onFatal := log.onFatal
+ // Noop is the default value for CheckWriteAction, and it leads to
+ // continued execution after a Fatal which is unexpected.
+ if onFatal == zapcore.WriteThenNoop {
+ onFatal = zapcore.WriteThenFatal
+ }
+ ce = ce.Should(ent, onFatal)
+ case zapcore.DPanicLevel:
+ if log.development {
+ ce = ce.Should(ent, zapcore.WriteThenPanic)
+ }
+ }
+
+ // Only do further annotation if we're going to write this message; checked
+ // entries that exist only for terminal behavior don't benefit from
+ // annotation.
+ if !willWrite {
+ return ce
+ }
+
+ // Thread the error output through to the CheckedEntry.
+ ce.ErrorOutput = log.errorOutput
+ if log.addCaller {
+ frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
+ if !defined {
+ fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
+ log.errorOutput.Sync()
+ }
+
+ ce.Entry.Caller = zapcore.EntryCaller{
+ Defined: defined,
+ PC: frame.PC,
+ File: frame.File,
+ Line: frame.Line,
+ Function: frame.Function,
+ }
+ }
+ if log.addStack.Enabled(ce.Entry.Level) {
+ ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
+ }
+
+ return ce
+}
+
+// getCallerFrame gets caller frame. The argument skip is the number of stack
+// frames to ascend, with 0 identifying the caller of getCallerFrame. The
+// boolean ok is false if it was not possible to recover the information.
+//
+// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
+func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
+ const skipOffset = 2 // skip getCallerFrame and Callers
+
+ pc := make([]uintptr, 1)
+ numFrames := runtime.Callers(skip+skipOffset, pc)
+ if numFrames < 1 {
+ return
+ }
+
+ frame, _ = runtime.CallersFrames(pc).Next()
+ return frame, frame.PC != 0
+}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
new file mode 100644
index 0000000..e9e6616
--- /dev/null
+++ b/vendor/go.uber.org/zap/options.go
@@ -0,0 +1,148 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// An Option configures a Logger.
+type Option interface {
+ apply(*Logger)
+}
+
+// optionFunc wraps a func so it satisfies the Option interface.
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+ f(log)
+}
+
+// WrapCore wraps or replaces the Logger's underlying zapcore.Core.
+func WrapCore(f func(zapcore.Core) zapcore.Core) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = f(log.core)
+ })
+}
+
+// Hooks registers functions which will be called each time the Logger writes
+// out an Entry. Repeated use of Hooks is additive.
+//
+// Hooks are useful for simple side effects, like capturing metrics for the
+// number of emitted logs. More complex side effects, including anything that
+// requires access to the Entry's structured fields, should be implemented as
+// a zapcore.Core instead. See zapcore.RegisterHooks for details.
+func Hooks(hooks ...func(zapcore.Entry) error) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = zapcore.RegisterHooks(log.core, hooks...)
+ })
+}
+
+// Fields adds fields to the Logger.
+func Fields(fs ...Field) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = log.core.With(fs)
+ })
+}
+
+// ErrorOutput sets the destination for errors generated by the Logger. Note
+// that this option only affects internal errors; for sample code that sends
+// error-level logs to a different location from info- and debug-level logs,
+// see the package-level AdvancedConfiguration example.
+//
+// The supplied WriteSyncer must be safe for concurrent use. The Open and
+// zapcore.Lock functions are the simplest ways to protect files with a mutex.
+func ErrorOutput(w zapcore.WriteSyncer) Option {
+ return optionFunc(func(log *Logger) {
+ log.errorOutput = w
+ })
+}
+
+// Development puts the logger in development mode, which makes DPanic-level
+// logs panic instead of simply logging an error.
+func Development() Option {
+ return optionFunc(func(log *Logger) {
+ log.development = true
+ })
+}
+
+// AddCaller configures the Logger to annotate each message with the filename,
+// line number, and function name of zap's caller. See also WithCaller.
+func AddCaller() Option {
+ return WithCaller(true)
+}
+
+// WithCaller configures the Logger to annotate each message with the filename,
+// line number, and function name of zap's caller, or not, depending on the
+// value of enabled. This is a generalized form of AddCaller.
+func WithCaller(enabled bool) Option {
+ return optionFunc(func(log *Logger) {
+ log.addCaller = enabled
+ })
+}
+
+// AddCallerSkip increases the number of callers skipped by caller annotation
+// (as enabled by the AddCaller option). When building wrappers around the
+// Logger and SugaredLogger, supplying this Option prevents zap from always
+// reporting the wrapper code as the caller.
+func AddCallerSkip(skip int) Option {
+ return optionFunc(func(log *Logger) {
+ log.callerSkip += skip
+ })
+}
+
+// AddStacktrace configures the Logger to record a stack trace for all messages at
+// or above a given level.
+func AddStacktrace(lvl zapcore.LevelEnabler) Option {
+ return optionFunc(func(log *Logger) {
+ log.addStack = lvl
+ })
+}
+
+// IncreaseLevel increase the level of the logger. It has no effect if
+// the passed in level tries to decrease the level of the logger.
+func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
+ return optionFunc(func(log *Logger) {
+ core, err := zapcore.NewIncreaseLevelCore(log.core, lvl)
+ if err != nil {
+ fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err)
+ } else {
+ log.core = core
+ }
+ })
+}
+
+// OnFatal sets the action to take on fatal logs.
+func OnFatal(action zapcore.CheckWriteAction) Option {
+ return optionFunc(func(log *Logger) {
+ log.onFatal = action
+ })
+}
+
+// WithClock specifies the clock used by the logger to determine the current
+// time for logged entries. Defaults to the system clock with time.Now.
+func WithClock(clock zapcore.Clock) Option {
+ return optionFunc(func(log *Logger) {
+ log.clock = clock
+ })
+}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
new file mode 100644
index 0000000..df46fa8
--- /dev/null
+++ b/vendor/go.uber.org/zap/sink.go
@@ -0,0 +1,161 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const schemeFile = "file"
+
+var (
+ _sinkMutex sync.RWMutex
+ _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+)
+
+func init() {
+ resetSinkRegistry()
+}
+
+func resetSinkRegistry() {
+ _sinkMutex.Lock()
+ defer _sinkMutex.Unlock()
+
+ _sinkFactories = map[string]func(*url.URL) (Sink, error){
+ schemeFile: newFileSink,
+ }
+}
+
+// Sink defines the interface to write to and close logger destinations.
+type Sink interface {
+ zapcore.WriteSyncer
+ io.Closer
+}
+
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type errSinkNotFound struct {
+ scheme string
+}
+
+func (e *errSinkNotFound) Error() string {
+ return fmt.Sprintf("no sink found for scheme %q", e.scheme)
+}
+
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 3.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ _sinkMutex.Lock()
+ defer _sinkMutex.Unlock()
+
+ if scheme == "" {
+ return errors.New("can't register a sink factory for empty string")
+ }
+ normalized, err := normalizeScheme(scheme)
+ if err != nil {
+ return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
+ }
+ if _, ok := _sinkFactories[normalized]; ok {
+ return fmt.Errorf("sink factory already registered for scheme %q", normalized)
+ }
+ _sinkFactories[normalized] = factory
+ return nil
+}
+
+func newSink(rawURL string) (Sink, error) {
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
+ }
+ if u.Scheme == "" {
+ u.Scheme = schemeFile
+ }
+
+ _sinkMutex.RLock()
+ factory, ok := _sinkFactories[u.Scheme]
+ _sinkMutex.RUnlock()
+ if !ok {
+ return nil, &errSinkNotFound{u.Scheme}
+ }
+ return factory(u)
+}
+
+func newFileSink(u *url.URL) (Sink, error) {
+ if u.User != nil {
+ return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
+ }
+ if u.Fragment != "" {
+ return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u)
+ }
+ if u.RawQuery != "" {
+ return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u)
+ }
+ // Error messages are better if we check hostname and port separately.
+ if u.Port() != "" {
+ return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u)
+ }
+ if hn := u.Hostname(); hn != "" && hn != "localhost" {
+ return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
+ }
+ switch u.Path {
+ case "stdout":
+ return nopCloserSink{os.Stdout}, nil
+ case "stderr":
+ return nopCloserSink{os.Stderr}, nil
+ }
+ return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+}
+
+func normalizeScheme(s string) (string, error) {
+ // https://tools.ietf.org/html/rfc3986#section-3.1
+ s = strings.ToLower(s)
+ if first := s[0]; 'a' > first || 'z' < first {
+ return "", errors.New("must start with a letter")
+ }
+ for i := 1; i < len(s); i++ { // iterate over bytes, not runes
+ c := s[i]
+ switch {
+ case 'a' <= c && c <= 'z':
+ continue
+ case '0' <= c && c <= '9':
+ continue
+ case c == '.' || c == '+' || c == '-':
+ continue
+ }
+ return "", fmt.Errorf("may not contain %q", c)
+ }
+ return s, nil
+}
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
new file mode 100644
index 0000000..0cf8c1d
--- /dev/null
+++ b/vendor/go.uber.org/zap/stacktrace.go
@@ -0,0 +1,85 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "runtime"
+ "sync"
+
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+var (
+ _stacktracePool = sync.Pool{
+ New: func() interface{} {
+ return newProgramCounters(64)
+ },
+ }
+)
+
+func takeStacktrace(skip int) string {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+ programCounters := _stacktracePool.Get().(*programCounters)
+ defer _stacktracePool.Put(programCounters)
+
+ var numFrames int
+ for {
+ // Skip the call to runtime.Callers and takeStacktrace so that the
+ // program counters start at the caller of takeStacktrace.
+ numFrames = runtime.Callers(skip+2, programCounters.pcs)
+ if numFrames < len(programCounters.pcs) {
+ break
+ }
+ // Don't put the too-short counter slice back into the pool; this lets
+ // the pool adjust if we consistently take deep stacktraces.
+ programCounters = newProgramCounters(len(programCounters.pcs) * 2)
+ }
+
+ i := 0
+ frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
+
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := frames.Next(); more; frame, more = frames.Next() {
+ if i != 0 {
+ buffer.AppendByte('\n')
+ }
+ i++
+ buffer.AppendString(frame.Function)
+ buffer.AppendByte('\n')
+ buffer.AppendByte('\t')
+ buffer.AppendString(frame.File)
+ buffer.AppendByte(':')
+ buffer.AppendInt(int64(frame.Line))
+ }
+
+ return buffer.String()
+}
+
+type programCounters struct {
+ pcs []uintptr
+}
+
+func newProgramCounters(size int) *programCounters {
+ return &programCounters{make([]uintptr, size)}
+}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
new file mode 100644
index 0000000..0b96519
--- /dev/null
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -0,0 +1,315 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ _oddNumberErrMsg = "Ignored key without a value."
+ _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+)
+
+// A SugaredLogger wraps the base Logger functionality in a slower, but less
+// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar
+// method.
+//
+// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
+// For each log level, it exposes three methods: one for loosely-typed
+// structured logging, one for println-style formatting, and one for
+// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
+// output with Infow ("info with" structured context), Info, or Infof.
+type SugaredLogger struct {
+ base *Logger
+}
+
+// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring
+// is quite inexpensive, so it's reasonable for a single application to use
+// both Loggers and SugaredLoggers, converting between them on the boundaries
+// of performance-sensitive code.
+func (s *SugaredLogger) Desugar() *Logger {
+ base := s.base.clone()
+ base.callerSkip -= 2
+ return base
+}
+
+// Named adds a sub-scope to the logger's name. See Logger.Named for details.
+func (s *SugaredLogger) Named(name string) *SugaredLogger {
+ return &SugaredLogger{base: s.base.Named(name)}
+}
+
+// With adds a variadic number of fields to the logging context. It accepts a
+// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
+// processing pairs, the first element of the pair is used as the field key
+// and the second as the field value.
+//
+// For example,
+// sugaredLogger.With(
+// "hello", "world",
+// "failure", errors.New("oh no"),
+// Stack(),
+// "count", 42,
+// "user", User{Name: "alice"},
+// )
+// is the equivalent of
+// unsugared.With(
+// String("hello", "world"),
+// String("failure", "oh no"),
+// Stack(),
+// Int("count", 42),
+// Object("user", User{Name: "alice"}),
+// )
+//
+// Note that the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics. In production, the logger is more
+// forgiving: a separate error is logged, but the key-value pair is skipped
+// and execution continues. Passing an orphaned key triggers similar behavior:
+// panics in development and errors in production.
+func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
+}
+
+// Debug uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Debug(args ...interface{}) {
+ s.log(DebugLevel, "", args, nil)
+}
+
+// Info uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Info(args ...interface{}) {
+ s.log(InfoLevel, "", args, nil)
+}
+
+// Warn uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Warn(args ...interface{}) {
+ s.log(WarnLevel, "", args, nil)
+}
+
+// Error uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Error(args ...interface{}) {
+ s.log(ErrorLevel, "", args, nil)
+}
+
+// DPanic uses fmt.Sprint to construct and log a message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanic(args ...interface{}) {
+ s.log(DPanicLevel, "", args, nil)
+}
+
+// Panic uses fmt.Sprint to construct and log a message, then panics.
+func (s *SugaredLogger) Panic(args ...interface{}) {
+ s.log(PanicLevel, "", args, nil)
+}
+
+// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
+func (s *SugaredLogger) Fatal(args ...interface{}) {
+ s.log(FatalLevel, "", args, nil)
+}
+
+// Debugf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
+ s.log(DebugLevel, template, args, nil)
+}
+
+// Infof uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Infof(template string, args ...interface{}) {
+ s.log(InfoLevel, template, args, nil)
+}
+
+// Warnf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
+ s.log(WarnLevel, template, args, nil)
+}
+
+// Errorf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
+ s.log(ErrorLevel, template, args, nil)
+}
+
+// DPanicf uses fmt.Sprintf to log a templated message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
+ s.log(DPanicLevel, template, args, nil)
+}
+
+// Panicf uses fmt.Sprintf to log a templated message, then panics.
+func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
+ s.log(PanicLevel, template, args, nil)
+}
+
+// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
+func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
+ s.log(FatalLevel, template, args, nil)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+//
+// When debug-level logging is disabled, this is much faster than
+// s.With(keysAndValues).Debug(msg)
+func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
+ s.log(DebugLevel, msg, nil, keysAndValues)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) {
+ s.log(InfoLevel, msg, nil, keysAndValues)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) {
+ s.log(WarnLevel, msg, nil, keysAndValues)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) {
+ s.log(ErrorLevel, msg, nil, keysAndValues)
+}
+
+// DPanicw logs a message with some additional context. In development, the
+// logger then panics. (See DPanicLevel for details.) The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) {
+ s.log(DPanicLevel, msg, nil, keysAndValues)
+}
+
+// Panicw logs a message with some additional context, then panics. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) {
+ s.log(PanicLevel, msg, nil, keysAndValues)
+}
+
+// Fatalw logs a message with some additional context, then calls os.Exit. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
+ s.log(FatalLevel, msg, nil, keysAndValues)
+}
+
+// Sync flushes any buffered log entries.
+func (s *SugaredLogger) Sync() error {
+ return s.base.Sync()
+}
+
+func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
+ // If logging at this level is completely disabled, skip the overhead of
+ // string formatting.
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessage(template, fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
+// getMessage format with Sprint, Sprintf, or neither.
+func getMessage(template string, fmtArgs []interface{}) string {
+ if len(fmtArgs) == 0 {
+ return template
+ }
+
+ if template != "" {
+ return fmt.Sprintf(template, fmtArgs...)
+ }
+
+ if len(fmtArgs) == 1 {
+ if str, ok := fmtArgs[0].(string); ok {
+ return str
+ }
+ }
+ return fmt.Sprint(fmtArgs...)
+}
+
+func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
+ if len(args) == 0 {
+ return nil
+ }
+
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields := make([]Field, 0, len(args))
+ var invalid invalidPairs
+
+ for i := 0; i < len(args); {
+ // This is a strongly-typed field. Consume it and move on.
+ if f, ok := args[i].(Field); ok {
+ fields = append(fields, f)
+ i++
+ continue
+ }
+
+ // Make sure this element isn't a dangling key.
+ if i == len(args)-1 {
+ s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
+ break
+ }
+
+ // Consume this value and the next, treating them as a key-value pair. If the
+ // key isn't a string, add this pair to the slice of invalid pairs.
+ key, val := args[i], args[i+1]
+ if keyStr, ok := key.(string); !ok {
+ // Subsequent errors are likely, so allocate once up front.
+ if cap(invalid) == 0 {
+ invalid = make(invalidPairs, 0, len(args)/2)
+ }
+ invalid = append(invalid, invalidPair{i, key, val})
+ } else {
+ fields = append(fields, Any(keyStr, val))
+ }
+ i += 2
+ }
+
+ // If we encountered any invalid key-value pairs, log an error.
+ if len(invalid) > 0 {
+ s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid))
+ }
+ return fields
+}
+
+type invalidPair struct {
+ position int
+ key, value interface{}
+}
+
+func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddInt64("position", int64(p.position))
+ Any("key", p.key).AddTo(enc)
+ Any("value", p.value).AddTo(enc)
+ return nil
+}
+
+type invalidPairs []invalidPair
+
+func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+ var err error
+ for i := range ps {
+ err = multierr.Append(err, enc.AppendObject(ps[i]))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go
new file mode 100644
index 0000000..c5a1f16
--- /dev/null
+++ b/vendor/go.uber.org/zap/time.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import "time"
+
+func timeToMillis(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond)
+}
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
new file mode 100644
index 0000000..86a709a
--- /dev/null
+++ b/vendor/go.uber.org/zap/writer.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+// Open is a high-level wrapper that takes a variadic number of URLs, opens or
+// creates each of the specified resources, and combines them into a locked
+// WriteSyncer. It also returns any error encountered and a function to close
+// any opened files.
+//
+// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a
+// scheme and URLs with the "file" scheme. Third-party code may register
+// factories for other schemes using RegisterSink.
+//
+// URLs with the "file" scheme must use absolute paths on the local
+// filesystem. No user, password, port, fragments, or query parameters are
+// allowed, and the hostname must be empty or "localhost".
+//
+// Since it's common to write logs to the local filesystem, URLs without a
+// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without
+// a scheme, the special paths "stdout" and "stderr" are interpreted as
+// os.Stdout and os.Stderr. When specified without a scheme, relative file
+// paths also work.
+func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
+ writers, close, err := open(paths)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ writer := CombineWriteSyncers(writers...)
+ return writer, close, nil
+}
+
+func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
+ writers := make([]zapcore.WriteSyncer, 0, len(paths))
+ closers := make([]io.Closer, 0, len(paths))
+ close := func() {
+ for _, c := range closers {
+ c.Close()
+ }
+ }
+
+ var openErr error
+ for _, path := range paths {
+ sink, err := newSink(path)
+ if err != nil {
+ openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
+ continue
+ }
+ writers = append(writers, sink)
+ closers = append(closers, sink)
+ }
+ if openErr != nil {
+ close()
+ return writers, nil, openErr
+ }
+
+ return writers, close, nil
+}
+
+// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
+// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op
+// WriteSyncer.
+//
+// It's provided purely as a convenience; the result is no different from
+// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
+func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
+ if len(writers) == 0 {
+ return zapcore.AddSync(ioutil.Discard)
+ }
+ return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
+}
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
new file mode 100644
index 0000000..ef2f7d9
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -0,0 +1,188 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bufio"
+ "sync"
+ "time"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ // _defaultBufferSize specifies the default size used by Buffer.
+ _defaultBufferSize = 256 * 1024 // 256 kB
+
+ // _defaultFlushInterval specifies the default flush interval for
+ // Buffer.
+ _defaultFlushInterval = 30 * time.Second
+)
+
+// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before
+// flushing them to a wrapped WriteSyncer after reaching some limit, or at some
+// fixed interval--whichever comes first.
+//
+// BufferedWriteSyncer is safe for concurrent use. You don't need to use
+// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+type BufferedWriteSyncer struct {
+ // WS is the WriteSyncer around which BufferedWriteSyncer will buffer
+ // writes.
+ //
+ // This field is required.
+ WS WriteSyncer
+
+ // Size specifies the maximum amount of data the writer will buffered
+ // before flushing.
+ //
+ // Defaults to 256 kB if unspecified.
+ Size int
+
+ // FlushInterval specifies how often the writer should flush data if
+ // there have been no writes.
+ //
+ // Defaults to 30 seconds if unspecified.
+ FlushInterval time.Duration
+
+ // Clock, if specified, provides control of the source of time for the
+ // writer.
+ //
+ // Defaults to the system clock.
+ Clock Clock
+
+ // unexported fields for state
+ mu sync.Mutex
+ initialized bool // whether initialize() has run
+ stopped bool // whether Stop() has run
+ writer *bufio.Writer
+ ticker *time.Ticker
+ stop chan struct{} // closed when flushLoop should stop
+ done chan struct{} // closed when flushLoop has stopped
+}
+
+func (s *BufferedWriteSyncer) initialize() {
+ size := s.Size
+ if size == 0 {
+ size = _defaultBufferSize
+ }
+
+ flushInterval := s.FlushInterval
+ if flushInterval == 0 {
+ flushInterval = _defaultFlushInterval
+ }
+
+ if s.Clock == nil {
+ s.Clock = DefaultClock
+ }
+
+ s.ticker = s.Clock.NewTicker(flushInterval)
+ s.writer = bufio.NewWriterSize(s.WS, size)
+ s.stop = make(chan struct{})
+ s.done = make(chan struct{})
+ s.initialized = true
+ go s.flushLoop()
+}
+
+// Write writes log data into buffer syncer directly, multiple Write calls will be batched,
+// and log data will be flushed to disk when the buffer is full or periodically.
+func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ s.initialize()
+ }
+
+ // To avoid partial writes from being flushed, we manually flush the existing buffer if:
+ // * The current write doesn't fit into the buffer fully, and
+ // * The buffer is not empty (since bufio will not split large writes when the buffer is empty)
+ if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 {
+ if err := s.writer.Flush(); err != nil {
+ return 0, err
+ }
+ }
+
+ return s.writer.Write(bs)
+}
+
+// Sync flushes buffered log data into disk directly.
+func (s *BufferedWriteSyncer) Sync() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var err error
+ if s.initialized {
+ err = s.writer.Flush()
+ }
+
+ return multierr.Append(err, s.WS.Sync())
+}
+
+// flushLoop flushes the buffer at the configured interval until Stop is
+// called.
+func (s *BufferedWriteSyncer) flushLoop() {
+ defer close(s.done)
+
+ for {
+ select {
+ case <-s.ticker.C:
+ // we just simply ignore error here
+ // because the underlying bufio writer stores any errors
+ // and we return any error from Sync() as part of the close
+ _ = s.Sync()
+ case <-s.stop:
+ return
+ }
+ }
+}
+
+// Stop closes the buffer, cleans up background goroutines, and flushes
+// remaining unwritten data.
+func (s *BufferedWriteSyncer) Stop() (err error) {
+ var stopped bool
+
+ // Critical section.
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ return
+ }
+
+ stopped = s.stopped
+ if stopped {
+ return
+ }
+ s.stopped = true
+
+ s.ticker.Stop()
+ close(s.stop) // tell flushLoop to stop
+ <-s.done // and wait until it has
+ }()
+
+ // Don't call Sync on consecutive Stops.
+ if !stopped {
+ err = s.Sync()
+ }
+
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
new file mode 100644
index 0000000..d2ea95b
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "time"
+)
+
+// DefaultClock is the default clock used by Zap in operations that require
+// time. This clock uses the system clock for all operations.
+var DefaultClock = systemClock{}
+
+// Clock is a source of time for logged entries.
+type Clock interface {
+ // Now returns the current local time.
+ Now() time.Time
+
+ // NewTicker returns *time.Ticker that holds a channel
+ // that delivers "ticks" of a clock.
+ NewTicker(time.Duration) *time.Ticker
+}
+
+// systemClock implements default Clock that uses system time.
+type systemClock struct{}
+
+func (systemClock) Now() time.Time {
+ return time.Now()
+}
+
+func (systemClock) NewTicker(duration time.Duration) *time.Ticker {
+ return time.NewTicker(duration)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
new file mode 100644
index 0000000..2307af4
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -0,0 +1,161 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+var _sliceEncoderPool = sync.Pool{
+ New: func() interface{} {
+ return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
+ },
+}
+
+func getSliceEncoder() *sliceArrayEncoder {
+ return _sliceEncoderPool.Get().(*sliceArrayEncoder)
+}
+
+func putSliceEncoder(e *sliceArrayEncoder) {
+ e.elems = e.elems[:0]
+ _sliceEncoderPool.Put(e)
+}
+
+type consoleEncoder struct {
+ *jsonEncoder
+}
+
+// NewConsoleEncoder creates an encoder whose output is designed for human -
+// rather than machine - consumption. It serializes the core log entry data
+// (message, level, timestamp, etc.) in a plain-text format and leaves the
+// structured context as JSON.
+//
+// Note that although the console encoder doesn't use the keys specified in the
+// encoder configuration, it will omit any element whose key is set to the empty
+// string.
+func NewConsoleEncoder(cfg EncoderConfig) Encoder {
+ if cfg.ConsoleSeparator == "" {
+ // Use a default delimiter of '\t' for backwards compatibility
+ cfg.ConsoleSeparator = "\t"
+ }
+ return consoleEncoder{newJSONEncoder(cfg, true)}
+}
+
+func (c consoleEncoder) Clone() Encoder {
+ return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)}
+}
+
+func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ line := bufferpool.Get()
+
+ // We don't want the entry's metadata to be quoted and escaped (if it's
+ // encoded as strings), which means that we can't use the JSON encoder. The
+ // simplest option is to use the memory encoder and fmt.Fprint.
+ //
+ // If this ever becomes a performance bottleneck, we can implement
+ // ArrayEncoder for our plain-text format.
+ arr := getSliceEncoder()
+ if c.TimeKey != "" && c.EncodeTime != nil {
+ c.EncodeTime(ent.Time, arr)
+ }
+ if c.LevelKey != "" && c.EncodeLevel != nil {
+ c.EncodeLevel(ent.Level, arr)
+ }
+ if ent.LoggerName != "" && c.NameKey != "" {
+ nameEncoder := c.EncodeName
+
+ if nameEncoder == nil {
+ // Fall back to FullNameEncoder for backward compatibility.
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, arr)
+ }
+ if ent.Caller.Defined {
+ if c.CallerKey != "" && c.EncodeCaller != nil {
+ c.EncodeCaller(ent.Caller, arr)
+ }
+ if c.FunctionKey != "" {
+ arr.AppendString(ent.Caller.Function)
+ }
+ }
+ for i := range arr.elems {
+ if i > 0 {
+ line.AppendString(c.ConsoleSeparator)
+ }
+ fmt.Fprint(line, arr.elems[i])
+ }
+ putSliceEncoder(arr)
+
+ // Add the message itself.
+ if c.MessageKey != "" {
+ c.addSeparatorIfNecessary(line)
+ line.AppendString(ent.Message)
+ }
+
+ // Add any structured context.
+ c.writeContext(line, fields)
+
+ // If there's no stacktrace key, honor that; this allows users to force
+ // single-line output.
+ if ent.Stack != "" && c.StacktraceKey != "" {
+ line.AppendByte('\n')
+ line.AppendString(ent.Stack)
+ }
+
+ if c.LineEnding != "" {
+ line.AppendString(c.LineEnding)
+ } else {
+ line.AppendString(DefaultLineEnding)
+ }
+ return line, nil
+}
+
+func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) {
+ context := c.jsonEncoder.Clone().(*jsonEncoder)
+ defer func() {
+ // putJSONEncoder assumes the buffer is still used, but we write out the buffer so
+ // we can free it.
+ context.buf.Free()
+ putJSONEncoder(context)
+ }()
+
+ addFields(context, extra)
+ context.closeOpenNamespaces()
+ if context.buf.Len() == 0 {
+ return
+ }
+
+ c.addSeparatorIfNecessary(line)
+ line.AppendByte('{')
+ line.Write(context.buf.Bytes())
+ line.AppendByte('}')
+}
+
+func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) {
+ if line.Len() > 0 {
+ line.AppendString(c.ConsoleSeparator)
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
new file mode 100644
index 0000000..a1ef8b0
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// Core is a minimal, fast logger interface. It's designed for library authors
+// to wrap in a more user-friendly API.
+type Core interface {
+ LevelEnabler
+
+ // With adds structured context to the Core.
+ With([]Field) Core
+ // Check determines whether the supplied Entry should be logged (using the
+ // embedded LevelEnabler and possibly some extra logic). If the entry
+ // should be logged, the Core adds itself to the CheckedEntry and returns
+ // the result.
+ //
+ // Callers must use Check before calling Write.
+ Check(Entry, *CheckedEntry) *CheckedEntry
+ // Write serializes the Entry and any Fields supplied at the log site and
+ // writes them to their destination.
+ //
+ // If called, Write should always log the Entry and Fields; it should not
+ // replicate the logic of Check.
+ Write(Entry, []Field) error
+ // Sync flushes buffered logs (if any).
+ Sync() error
+}
+
+type nopCore struct{}
+
+// NewNopCore returns a no-op Core.
+func NewNopCore() Core { return nopCore{} }
+func (nopCore) Enabled(Level) bool { return false }
+func (n nopCore) With([]Field) Core { return n }
+func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce }
+func (nopCore) Write(Entry, []Field) error { return nil }
+func (nopCore) Sync() error { return nil }
+
+// NewCore creates a Core that writes logs to a WriteSyncer.
+func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core {
+ return &ioCore{
+ LevelEnabler: enab,
+ enc: enc,
+ out: ws,
+ }
+}
+
+type ioCore struct {
+ LevelEnabler
+ enc Encoder
+ out WriteSyncer
+}
+
+func (c *ioCore) With(fields []Field) Core {
+ clone := c.clone()
+ addFields(clone.enc, fields)
+ return clone
+}
+
+func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if c.Enabled(ent.Level) {
+ return ce.AddCore(ent, c)
+ }
+ return ce
+}
+
+func (c *ioCore) Write(ent Entry, fields []Field) error {
+ buf, err := c.enc.EncodeEntry(ent, fields)
+ if err != nil {
+ return err
+ }
+ _, err = c.out.Write(buf.Bytes())
+ buf.Free()
+ if err != nil {
+ return err
+ }
+ if ent.Level > ErrorLevel {
+ // Since we may be crashing the program, sync the output. Ignore Sync
+ // errors, pending a clean solution to issue #370.
+ c.Sync()
+ }
+ return nil
+}
+
+func (c *ioCore) Sync() error {
+ return c.out.Sync()
+}
+
+func (c *ioCore) clone() *ioCore {
+ return &ioCore{
+ LevelEnabler: c.LevelEnabler,
+ enc: c.enc.Clone(),
+ out: c.out,
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go
new file mode 100644
index 0000000..31000e9
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zapcore defines and implements the low-level interfaces upon which
+// zap is built. By providing alternate implementations of these interfaces,
+// external packages can extend zap's capabilities.
+package zapcore // import "go.uber.org/zap/zapcore"
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
new file mode 100644
index 0000000..6601ca1
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -0,0 +1,443 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "time"
+
+ "go.uber.org/zap/buffer"
+)
+
+// DefaultLineEnding defines the default line ending when writing logs.
+// Alternate line endings specified in EncoderConfig can override this
+// behavior.
+const DefaultLineEnding = "\n"
+
+// OmitKey defines the key to use when callers want to remove a key from log output.
+const OmitKey = ""
+
+// A LevelEncoder serializes a Level to a primitive type.
+type LevelEncoder func(Level, PrimitiveArrayEncoder)
+
+// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
+// InfoLevel is serialized to "info".
+func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.String())
+}
+
+// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring.
+// For example, InfoLevel is serialized to "info" and colored blue.
+func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToLowercaseColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.String())
+ }
+ enc.AppendString(s)
+}
+
+// CapitalLevelEncoder serializes a Level to an all-caps string. For example,
+// InfoLevel is serialized to "INFO".
+func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.CapitalString())
+}
+
+// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color.
+// For example, InfoLevel is serialized to "INFO" and colored blue.
+func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToCapitalColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.CapitalString())
+ }
+ enc.AppendString(s)
+}
+
+// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to
+// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder,
+// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else
+// is unmarshaled to LowercaseLevelEncoder.
+func (e *LevelEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "capital":
+ *e = CapitalLevelEncoder
+ case "capitalColor":
+ *e = CapitalColorLevelEncoder
+ case "color":
+ *e = LowercaseColorLevelEncoder
+ default:
+ *e = LowercaseLevelEncoder
+ }
+ return nil
+}
+
+// A TimeEncoder serializes a time.Time to a primitive type.
+type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
+
+// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
+// since the Unix epoch.
+func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ sec := float64(nanos) / float64(time.Second)
+ enc.AppendFloat64(sec)
+}
+
+// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of
+// milliseconds since the Unix epoch.
+func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ millis := float64(nanos) / float64(time.Millisecond)
+ enc.AppendFloat64(millis)
+}
+
+// EpochNanosTimeEncoder serializes a time.Time to an integer number of
+// nanoseconds since the Unix epoch.
+func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(t.UnixNano())
+}
+
+func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) {
+ type appendTimeEncoder interface {
+ AppendTimeLayout(time.Time, string)
+ }
+
+ if enc, ok := enc.(appendTimeEncoder); ok {
+ enc.AppendTimeLayout(t, layout)
+ return
+ }
+
+ enc.AppendString(t.Format(layout))
+}
+
+// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string
+// with millisecond precision.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc)
+}
+
+// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, time.RFC3339, enc)
+}
+
+// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string
+// with nanosecond precision.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, time.RFC3339Nano, enc)
+}
+
+// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using
+// given layout.
+func TimeEncoderOfLayout(layout string) TimeEncoder {
+ return func(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, layout, enc)
+ }
+}
+
+// UnmarshalText unmarshals text to a TimeEncoder.
+// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder.
+// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder.
+// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder.
+// "millis" is unmarshaled to EpochMillisTimeEncoder.
+// "nanos" is unmarshaled to EpochNanosEncoder.
+// Anything else is unmarshaled to EpochTimeEncoder.
+func (e *TimeEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "rfc3339nano", "RFC3339Nano":
+ *e = RFC3339NanoTimeEncoder
+ case "rfc3339", "RFC3339":
+ *e = RFC3339TimeEncoder
+ case "iso8601", "ISO8601":
+ *e = ISO8601TimeEncoder
+ case "millis":
+ *e = EpochMillisTimeEncoder
+ case "nanos":
+ *e = EpochNanosTimeEncoder
+ default:
+ *e = EpochTimeEncoder
+ }
+ return nil
+}
+
+// UnmarshalYAML unmarshals YAML to a TimeEncoder.
+// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
+// timeEncoder:
+// layout: 06/01/02 03:04pm
+// If value is string, it uses UnmarshalText.
+// timeEncoder: iso8601
+func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var o struct {
+ Layout string `json:"layout" yaml:"layout"`
+ }
+ if err := unmarshal(&o); err == nil {
+ *e = TimeEncoderOfLayout(o.Layout)
+ return nil
+ }
+
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ return e.UnmarshalText([]byte(s))
+}
+
+// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does.
+func (e *TimeEncoder) UnmarshalJSON(data []byte) error {
+ return e.UnmarshalYAML(func(v interface{}) error {
+ return json.Unmarshal(data, v)
+ })
+}
+
+// A DurationEncoder serializes a time.Duration to a primitive type.
+type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
+
+// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
+func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendFloat64(float64(d) / float64(time.Second))
+}
+
+// NanosDurationEncoder serializes a time.Duration to an integer number of
+// nanoseconds elapsed.
+func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(int64(d))
+}
+
+// MillisDurationEncoder serializes a time.Duration to an integer number of
+// milliseconds elapsed.
+func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(d.Nanoseconds() / 1e6)
+}
+
+// StringDurationEncoder serializes a time.Duration using its built-in String
+// method.
+func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendString(d.String())
+}
+
+// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled
+// to StringDurationEncoder, and anything else is unmarshaled to
+// NanosDurationEncoder.
+func (e *DurationEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "string":
+ *e = StringDurationEncoder
+ case "nanos":
+ *e = NanosDurationEncoder
+ case "ms":
+ *e = MillisDurationEncoder
+ default:
+ *e = SecondsDurationEncoder
+ }
+ return nil
+}
+
+// A CallerEncoder serializes an EntryCaller to a primitive type.
+type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
+
+// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
+// format.
+func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.String())
+}
+
+// ShortCallerEncoder serializes a caller in package/file:line format, trimming
+// all but the final directory from the full path.
+func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.TrimmedPath())
+}
+
+// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to
+// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder.
+func (e *CallerEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullCallerEncoder
+ default:
+ *e = ShortCallerEncoder
+ }
+ return nil
+}
+
+// A NameEncoder serializes a period-separated logger name to a primitive
+// type.
+type NameEncoder func(string, PrimitiveArrayEncoder)
+
+// FullNameEncoder serializes the logger name as-is.
+func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) {
+ enc.AppendString(loggerName)
+}
+
+// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is
+// unmarshaled to FullNameEncoder.
+func (e *NameEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullNameEncoder
+ default:
+ *e = FullNameEncoder
+ }
+ return nil
+}
+
+// An EncoderConfig allows users to configure the concrete encoders supplied by
+// zapcore.
+type EncoderConfig struct {
+ // Set the keys used for each log entry. If any key is empty, that portion
+ // of the entry is omitted.
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ // Configure the primitive representations of common complex types. For
+ // example, some users may want all time.Times serialized as floating-point
+ // seconds since epoch, while others may prefer ISO8601 strings.
+ EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"`
+ EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
+ EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
+ EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"`
+ // Unlike the other primitive type encoders, EncodeName is optional. The
+ // zero value falls back to FullNameEncoder.
+ EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configures the field separator used by the console encoder. Defaults
+ // to tab.
+ ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
+}
+
+// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a
+// map- or struct-like object to the logging context. Like maps, ObjectEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ObjectEncoder interface {
+ // Logging-specific marshalers.
+ AddArray(key string, marshaler ArrayMarshaler) error
+ AddObject(key string, marshaler ObjectMarshaler) error
+
+ // Built-in types.
+ AddBinary(key string, value []byte) // for arbitrary bytes
+ AddByteString(key string, value []byte) // for UTF-8 encoded bytes
+ AddBool(key string, value bool)
+ AddComplex128(key string, value complex128)
+ AddComplex64(key string, value complex64)
+ AddDuration(key string, value time.Duration)
+ AddFloat64(key string, value float64)
+ AddFloat32(key string, value float32)
+ AddInt(key string, value int)
+ AddInt64(key string, value int64)
+ AddInt32(key string, value int32)
+ AddInt16(key string, value int16)
+ AddInt8(key string, value int8)
+ AddString(key, value string)
+ AddTime(key string, value time.Time)
+ AddUint(key string, value uint)
+ AddUint64(key string, value uint64)
+ AddUint32(key string, value uint32)
+ AddUint16(key string, value uint16)
+ AddUint8(key string, value uint8)
+ AddUintptr(key string, value uintptr)
+
+ // AddReflected uses reflection to serialize arbitrary objects, so it can be
+ // slow and allocation-heavy.
+ AddReflected(key string, value interface{}) error
+ // OpenNamespace opens an isolated namespace where all subsequent fields will
+ // be added. Applications can use namespaces to prevent key collisions when
+ // injecting loggers into sub-components or third-party libraries.
+ OpenNamespace(key string)
+}
+
+// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding
+// array-like objects to the logging context. Of note, it supports mixed-type
+// arrays even though they aren't typical in Go. Like slices, ArrayEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ArrayEncoder interface {
+ // Built-in types.
+ PrimitiveArrayEncoder
+
+ // Time-related types.
+ AppendDuration(time.Duration)
+ AppendTime(time.Time)
+
+ // Logging-specific marshalers.
+ AppendArray(ArrayMarshaler) error
+ AppendObject(ObjectMarshaler) error
+
+ // AppendReflected uses reflection to serialize arbitrary objects, so it's
+ // slow and allocation-heavy.
+ AppendReflected(value interface{}) error
+}
+
+// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals
+// only in Go's built-in types. It's included only so that Duration- and
+// TimeEncoders cannot trigger infinite recursion.
+type PrimitiveArrayEncoder interface {
+ // Built-in types.
+ AppendBool(bool)
+ AppendByteString([]byte) // for UTF-8 encoded bytes
+ AppendComplex128(complex128)
+ AppendComplex64(complex64)
+ AppendFloat64(float64)
+ AppendFloat32(float32)
+ AppendInt(int)
+ AppendInt64(int64)
+ AppendInt32(int32)
+ AppendInt16(int16)
+ AppendInt8(int8)
+ AppendString(string)
+ AppendUint(uint)
+ AppendUint64(uint64)
+ AppendUint32(uint32)
+ AppendUint16(uint16)
+ AppendUint8(uint8)
+ AppendUintptr(uintptr)
+}
+
+// Encoder is a format-agnostic interface for all log entry marshalers. Since
+// log encoders don't need to support the same wide range of use cases as
+// general-purpose marshalers, it's possible to make them faster and
+// lower-allocation.
+//
+// Implementations of the ObjectEncoder interface's methods can, of course,
+// freely modify the receiver. However, the Clone and EncodeEntry methods will
+// be called concurrently and shouldn't modify the receiver.
+type Encoder interface {
+ ObjectEncoder
+
+ // Clone copies the encoder, ensuring that adding fields to the copy doesn't
+ // affect the original.
+ Clone() Encoder
+
+ // EncodeEntry encodes an entry and fields, along with any accumulated
+ // context, into a byte buffer and returns it. Any fields that are empty,
+ // including fields on the `Entry` type, should be omitted.
+ EncodeEntry(Entry, []Field) (*buffer.Buffer, error)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
new file mode 100644
index 0000000..0885505
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -0,0 +1,262 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/exit"
+
+ "go.uber.org/multierr"
+)
+
+var (
+ _cePool = sync.Pool{New: func() interface{} {
+ // Pre-allocate some space for cores.
+ return &CheckedEntry{
+ cores: make([]Core, 4),
+ }
+ }}
+)
+
+func getCheckedEntry() *CheckedEntry {
+ ce := _cePool.Get().(*CheckedEntry)
+ ce.reset()
+ return ce
+}
+
+func putCheckedEntry(ce *CheckedEntry) {
+ if ce == nil {
+ return
+ }
+ _cePool.Put(ce)
+}
+
+// NewEntryCaller makes an EntryCaller from the return signature of
+// runtime.Caller.
+func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller {
+ if !ok {
+ return EntryCaller{}
+ }
+ return EntryCaller{
+ PC: pc,
+ File: file,
+ Line: line,
+ Defined: true,
+ }
+}
+
+// EntryCaller represents the caller of a logging function.
+type EntryCaller struct {
+ Defined bool
+ PC uintptr
+ File string
+ Line int
+ Function string
+}
+
+// String returns the full path and line number of the caller.
+func (ec EntryCaller) String() string {
+ return ec.FullPath()
+}
+
+// FullPath returns a /full/path/to/package/file:line description of the
+// caller.
+func (ec EntryCaller) FullPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ buf := bufferpool.Get()
+ buf.AppendString(ec.File)
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// TrimmedPath returns a package/file:line description of the caller,
+// preserving only the leaf directory name and file name.
+func (ec EntryCaller) TrimmedPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ // nb. To make sure we trim the path correctly on Windows too, we
+ // counter-intuitively need to use '/' and *not* os.PathSeparator here,
+ // because the path given originates from Go stdlib, specifically
+ // runtime.Caller() which (as of Mar/17) returns forward slashes even on
+ // Windows.
+ //
+ // See https://github.com/golang/go/issues/3335
+ // and https://github.com/golang/go/issues/18151
+ //
+ // for discussion on the issue on Go side.
+ //
+ // Find the last separator.
+ //
+ idx := strings.LastIndexByte(ec.File, '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ // Find the penultimate separator.
+ idx = strings.LastIndexByte(ec.File[:idx], '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ buf := bufferpool.Get()
+ // Keep everything after the penultimate separator.
+ buf.AppendString(ec.File[idx+1:])
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// An Entry represents a complete log message. The entry's structured context
+// is already serialized, but the log level, time, message, and call site
+// information are available for inspection and modification. Any fields left
+// empty will be omitted when encoding.
+//
+// Entries are pooled, so any functions that accept them MUST be careful not to
+// retain references to them.
+type Entry struct {
+ Level Level
+ Time time.Time
+ LoggerName string
+ Message string
+ Caller EntryCaller
+ Stack string
+}
+
+// CheckWriteAction indicates what action to take after a log entry is
+// processed. Actions are ordered in increasing severity.
+type CheckWriteAction uint8
+
+const (
+ // WriteThenNoop indicates that nothing special needs to be done. It's the
+ // default behavior.
+ WriteThenNoop CheckWriteAction = iota
+ // WriteThenGoexit runs runtime.Goexit after Write.
+ WriteThenGoexit
+ // WriteThenPanic causes a panic after Write.
+ WriteThenPanic
+ // WriteThenFatal causes a fatal os.Exit after Write.
+ WriteThenFatal
+)
+
+// CheckedEntry is an Entry together with a collection of Cores that have
+// already agreed to log it.
+//
+// CheckedEntry references should be created by calling AddCore or Should on a
+// nil *CheckedEntry. References are returned to a pool after Write, and MUST
+// NOT be retained after calling their Write method.
+type CheckedEntry struct {
+ Entry
+ ErrorOutput WriteSyncer
+ dirty bool // best-effort detection of pool misuse
+ should CheckWriteAction
+ cores []Core
+}
+
+func (ce *CheckedEntry) reset() {
+ ce.Entry = Entry{}
+ ce.ErrorOutput = nil
+ ce.dirty = false
+ ce.should = WriteThenNoop
+ for i := range ce.cores {
+ // don't keep references to cores
+ ce.cores[i] = nil
+ }
+ ce.cores = ce.cores[:0]
+}
+
+// Write writes the entry to the stored Cores, returns any errors, and returns
+// the CheckedEntry reference to a pool for immediate re-use. Finally, it
+// executes any required CheckWriteAction.
+func (ce *CheckedEntry) Write(fields ...Field) {
+ if ce == nil {
+ return
+ }
+
+ if ce.dirty {
+ if ce.ErrorOutput != nil {
+ // Make a best effort to detect unsafe re-use of this CheckedEntry.
+ // If the entry is dirty, log an internal error; because the
+ // CheckedEntry is being used after it was returned to the pool,
+ // the message may be an amalgamation from multiple call sites.
+ fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
+ ce.ErrorOutput.Sync()
+ }
+ return
+ }
+ ce.dirty = true
+
+ var err error
+ for i := range ce.cores {
+ err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
+ }
+ if err != nil && ce.ErrorOutput != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+ ce.ErrorOutput.Sync()
+ }
+
+ should, msg := ce.should, ce.Message
+ putCheckedEntry(ce)
+
+ switch should {
+ case WriteThenPanic:
+ panic(msg)
+ case WriteThenFatal:
+ exit.Exit()
+ case WriteThenGoexit:
+ runtime.Goexit()
+ }
+}
+
+// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
+// used by Core.Check implementations, and is safe to call on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.cores = append(ce.cores, core)
+ return ce
+}
+
+// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
+// Core will panic or fatal after writing this log entry. Like AddCore, it's
+// safe to call on nil CheckedEntry references.
+func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.should = should
+ return ce
+}
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
new file mode 100644
index 0000000..74919b0
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// Encodes the given error into fields of an object. A field with the given
+// name is added for the error message.
+//
+// If the error implements fmt.Formatter, a field with the name ${key}Verbose
+// is also added with the full verbose error message.
+//
+// Finally, if the error implements errorGroup (from go.uber.org/multierr) or
+// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
+// array of objects containing the errors this error was comprised of.
+//
+// {
+// "error": err.Error(),
+// "errorVerbose": fmt.Sprintf("%+v", err),
+// "errorCauses": [
+// ...
+// ],
+// }
+func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
+ // Try to capture panics (from nil references or otherwise) when calling
+ // the Error() method
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ // If it's a nil pointer, just say "". The likeliest causes are a
+ // error that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "" is a nice result.
+ if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
+ enc.AddString(key, "")
+ return
+ }
+
+ retErr = fmt.Errorf("PANIC=%v", rerr)
+ }
+ }()
+
+ basic := err.Error()
+ enc.AddString(key, basic)
+
+ switch e := err.(type) {
+ case errorGroup:
+ return enc.AddArray(key+"Causes", errArray(e.Errors()))
+ case fmt.Formatter:
+ verbose := fmt.Sprintf("%+v", e)
+ if verbose != basic {
+ // This is a rich error type, like those produced by
+ // github.com/pkg/errors.
+ enc.AddString(key+"Verbose", verbose)
+ }
+ }
+ return nil
+}
+
+type errorGroup interface {
+ // Provides read-only access to the underlying list of errors, preferably
+ // without causing any allocs.
+ Errors() []error
+}
+
+// Note that errArray and errArrayElem are very similar to the version
+// implemented in the top-level error.go file. We can't re-use this because
+// that would require exporting errArray as part of the zapcore API.
+
+// Encodes a list of errors using the standard error encoding logic.
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+
+ el := newErrArrayElem(errs[i])
+ arr.AppendObject(el)
+ el.Free()
+ }
+ return nil
+}
+
+var _errArrayElemPool = sync.Pool{New: func() interface{} {
+ return &errArrayElem{}
+}}
+
+// Encodes any error into a {"error": ...} re-using the same errors logic.
+//
+// May be passed in place of an array to build a single-element array.
+type errArrayElem struct{ err error }
+
+func newErrArrayElem(err error) *errArrayElem {
+ e := _errArrayElemPool.Get().(*errArrayElem)
+ e.err = err
+ return e
+}
+
+func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error {
+ return arr.AppendObject(e)
+}
+
+func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error {
+ return encodeError("error", e.err, enc)
+}
+
+func (e *errArrayElem) Free() {
+ e.err = nil
+ _errArrayElemPool.Put(e)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
new file mode 100644
index 0000000..95bdb0a
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/field.go
@@ -0,0 +1,233 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+)
+
+// A FieldType indicates which member of the Field union struct should be used
+// and how it should be serialized.
+type FieldType uint8
+
+const (
+ // UnknownType is the default field type. Attempting to add it to an encoder will panic.
+ UnknownType FieldType = iota
+ // ArrayMarshalerType indicates that the field carries an ArrayMarshaler.
+ ArrayMarshalerType
+ // ObjectMarshalerType indicates that the field carries an ObjectMarshaler.
+ ObjectMarshalerType
+ // BinaryType indicates that the field carries an opaque binary blob.
+ BinaryType
+ // BoolType indicates that the field carries a bool.
+ BoolType
+ // ByteStringType indicates that the field carries UTF-8 encoded bytes.
+ ByteStringType
+ // Complex128Type indicates that the field carries a complex128.
+ Complex128Type
+ // Complex64Type indicates that the field carries a complex128.
+ Complex64Type
+ // DurationType indicates that the field carries a time.Duration.
+ DurationType
+ // Float64Type indicates that the field carries a float64.
+ Float64Type
+ // Float32Type indicates that the field carries a float32.
+ Float32Type
+ // Int64Type indicates that the field carries an int64.
+ Int64Type
+ // Int32Type indicates that the field carries an int32.
+ Int32Type
+ // Int16Type indicates that the field carries an int16.
+ Int16Type
+ // Int8Type indicates that the field carries an int8.
+ Int8Type
+ // StringType indicates that the field carries a string.
+ StringType
+ // TimeType indicates that the field carries a time.Time that is
+ // representable by a UnixNano() stored as an int64.
+ TimeType
+ // TimeFullType indicates that the field carries a time.Time stored as-is.
+ TimeFullType
+ // Uint64Type indicates that the field carries a uint64.
+ Uint64Type
+ // Uint32Type indicates that the field carries a uint32.
+ Uint32Type
+ // Uint16Type indicates that the field carries a uint16.
+ Uint16Type
+ // Uint8Type indicates that the field carries a uint8.
+ Uint8Type
+ // UintptrType indicates that the field carries a uintptr.
+ UintptrType
+ // ReflectType indicates that the field carries an interface{}, which should
+ // be serialized using reflection.
+ ReflectType
+ // NamespaceType signals the beginning of an isolated namespace. All
+ // subsequent fields should be added to the new namespace.
+ NamespaceType
+ // StringerType indicates that the field carries a fmt.Stringer.
+ StringerType
+ // ErrorType indicates that the field carries an error.
+ ErrorType
+ // SkipType indicates that the field is a no-op.
+ SkipType
+
+ // InlineMarshalerType indicates that the field carries an ObjectMarshaler
+ // that should be inlined.
+ InlineMarshalerType
+)
+
+// A Field is a marshaling operation used to add a key-value pair to a logger's
+// context. Most fields are lazily marshaled, so it's inexpensive to add fields
+// to disabled debug-level log statements.
+type Field struct {
+ Key string
+ Type FieldType
+ Integer int64
+ String string
+ Interface interface{}
+}
+
+// AddTo exports a field through the ObjectEncoder interface. It's primarily
+// useful to library authors, and shouldn't be necessary in most applications.
+func (f Field) AddTo(enc ObjectEncoder) {
+ var err error
+
+ switch f.Type {
+ case ArrayMarshalerType:
+ err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler))
+ case ObjectMarshalerType:
+ err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler))
+ case InlineMarshalerType:
+ err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc)
+ case BinaryType:
+ enc.AddBinary(f.Key, f.Interface.([]byte))
+ case BoolType:
+ enc.AddBool(f.Key, f.Integer == 1)
+ case ByteStringType:
+ enc.AddByteString(f.Key, f.Interface.([]byte))
+ case Complex128Type:
+ enc.AddComplex128(f.Key, f.Interface.(complex128))
+ case Complex64Type:
+ enc.AddComplex64(f.Key, f.Interface.(complex64))
+ case DurationType:
+ enc.AddDuration(f.Key, time.Duration(f.Integer))
+ case Float64Type:
+ enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer)))
+ case Float32Type:
+ enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer)))
+ case Int64Type:
+ enc.AddInt64(f.Key, f.Integer)
+ case Int32Type:
+ enc.AddInt32(f.Key, int32(f.Integer))
+ case Int16Type:
+ enc.AddInt16(f.Key, int16(f.Integer))
+ case Int8Type:
+ enc.AddInt8(f.Key, int8(f.Integer))
+ case StringType:
+ enc.AddString(f.Key, f.String)
+ case TimeType:
+ if f.Interface != nil {
+ enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location)))
+ } else {
+ // Fall back to UTC if location is nil.
+ enc.AddTime(f.Key, time.Unix(0, f.Integer))
+ }
+ case TimeFullType:
+ enc.AddTime(f.Key, f.Interface.(time.Time))
+ case Uint64Type:
+ enc.AddUint64(f.Key, uint64(f.Integer))
+ case Uint32Type:
+ enc.AddUint32(f.Key, uint32(f.Integer))
+ case Uint16Type:
+ enc.AddUint16(f.Key, uint16(f.Integer))
+ case Uint8Type:
+ enc.AddUint8(f.Key, uint8(f.Integer))
+ case UintptrType:
+ enc.AddUintptr(f.Key, uintptr(f.Integer))
+ case ReflectType:
+ err = enc.AddReflected(f.Key, f.Interface)
+ case NamespaceType:
+ enc.OpenNamespace(f.Key)
+ case StringerType:
+ err = encodeStringer(f.Key, f.Interface, enc)
+ case ErrorType:
+ err = encodeError(f.Key, f.Interface.(error), enc)
+ case SkipType:
+ break
+ default:
+ panic(fmt.Sprintf("unknown field type: %v", f))
+ }
+
+ if err != nil {
+ enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error())
+ }
+}
+
+// Equals returns whether two fields are equal. For non-primitive types such as
+// errors, marshalers, or reflect types, it uses reflect.DeepEqual.
+func (f Field) Equals(other Field) bool {
+ if f.Type != other.Type {
+ return false
+ }
+ if f.Key != other.Key {
+ return false
+ }
+
+ switch f.Type {
+ case BinaryType, ByteStringType:
+ return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte))
+ case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType:
+ return reflect.DeepEqual(f.Interface, other.Interface)
+ default:
+ return f == other
+ }
+}
+
+func addFields(enc ObjectEncoder, fields []Field) {
+ for i := range fields {
+ fields[i].AddTo(enc)
+ }
+}
+
+func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) {
+ // Try to capture panics (from nil references or otherwise) when calling
+ // the String() method, similar to https://golang.org/src/fmt/print.go#L540
+ defer func() {
+ if err := recover(); err != nil {
+ // If it's a nil pointer, just say "". The likeliest causes are a
+ // Stringer that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "" is a nice result.
+ if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() {
+ enc.AddString(key, "")
+ return
+ }
+
+ retErr = fmt.Errorf("PANIC=%v", err)
+ }
+ }()
+
+ enc.AddString(key, stringer.(fmt.Stringer).String())
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
new file mode 100644
index 0000000..5db4afb
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type hooked struct {
+ Core
+ funcs []func(Entry) error
+}
+
+// RegisterHooks wraps a Core and runs a collection of user-defined callback
+// hooks each time a message is logged. Execution of the callbacks is blocking.
+//
+// This offers users an easy way to register simple callbacks (e.g., metrics
+// collection) without implementing the full Core interface.
+func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
+ funcs := append([]func(Entry) error{}, hooks...)
+ return &hooked{
+ Core: core,
+ funcs: funcs,
+ }
+}
+
+func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ // Let the wrapped Core decide whether to log this message or not. This
+ // also gives the downstream a chance to register itself directly with the
+ // CheckedEntry.
+ if downstream := h.Core.Check(ent, ce); downstream != nil {
+ return downstream.AddCore(ent, h)
+ }
+ return ce
+}
+
+func (h *hooked) With(fields []Field) Core {
+ return &hooked{
+ Core: h.Core.With(fields),
+ funcs: h.funcs,
+ }
+}
+
+func (h *hooked) Write(ent Entry, _ []Field) error {
+ // Since our downstream had a chance to register itself directly with the
+ // CheckedMessage, we don't need to call it here.
+ var err error
+ for i := range h.funcs {
+ err = multierr.Append(err, h.funcs[i](ent))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
new file mode 100644
index 0000000..5a17492
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -0,0 +1,66 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "fmt"
+
+type levelFilterCore struct {
+ core Core
+ level LevelEnabler
+}
+
+// NewIncreaseLevelCore creates a core that can be used to increase the level of
+// an existing Core. It cannot be used to decrease the logging level, as it acts
+// as a filter before calling the underlying core. If level decreases the log level,
+// an error is returned.
+func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) {
+ for l := _maxLevel; l >= _minLevel; l-- {
+ if !core.Enabled(l) && level.Enabled(l) {
+ return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l)
+ }
+ }
+
+ return &levelFilterCore{core, level}, nil
+}
+
+func (c *levelFilterCore) Enabled(lvl Level) bool {
+ return c.level.Enabled(lvl)
+}
+
+func (c *levelFilterCore) With(fields []Field) Core {
+ return &levelFilterCore{c.core.With(fields), c.level}
+}
+
+func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if !c.Enabled(ent.Level) {
+ return ce
+ }
+
+ return c.core.Check(ent, ce)
+}
+
+func (c *levelFilterCore) Write(ent Entry, fields []Field) error {
+ return c.core.Write(ent, fields)
+}
+
+func (c *levelFilterCore) Sync() error {
+ return c.core.Sync()
+}
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
new file mode 100644
index 0000000..af220d9
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -0,0 +1,542 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "math"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+// For JSON-escaping; see jsonEncoder.safeAddString below.
+const _hex = "0123456789abcdef"
+
+var _jsonPool = sync.Pool{New: func() interface{} {
+ return &jsonEncoder{}
+}}
+
+func getJSONEncoder() *jsonEncoder {
+ return _jsonPool.Get().(*jsonEncoder)
+}
+
+func putJSONEncoder(enc *jsonEncoder) {
+ if enc.reflectBuf != nil {
+ enc.reflectBuf.Free()
+ }
+ enc.EncoderConfig = nil
+ enc.buf = nil
+ enc.spaced = false
+ enc.openNamespaces = 0
+ enc.reflectBuf = nil
+ enc.reflectEnc = nil
+ _jsonPool.Put(enc)
+}
+
+type jsonEncoder struct {
+ *EncoderConfig
+ buf *buffer.Buffer
+ spaced bool // include spaces after colons and commas
+ openNamespaces int
+
+ // for encoding generic values by reflection
+ reflectBuf *buffer.Buffer
+ reflectEnc *json.Encoder
+}
+
+// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
+// appropriately escapes all field keys and values.
+//
+// Note that the encoder doesn't deduplicate keys, so it's possible to produce
+// a message like
+// {"foo":"bar","foo":"baz"}
+// This is permitted by the JSON specification, but not encouraged. Many
+// libraries will ignore duplicate key-value pairs (typically keeping the last
+// pair) when unmarshaling, but users should attempt to avoid adding duplicate
+// keys.
+func NewJSONEncoder(cfg EncoderConfig) Encoder {
+ return newJSONEncoder(cfg, false)
+}
+
+func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ return &jsonEncoder{
+ EncoderConfig: &cfg,
+ buf: bufferpool.Get(),
+ spaced: spaced,
+ }
+}
+
+func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendArray(arr)
+}
+
+func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendObject(obj)
+}
+
+func (enc *jsonEncoder) AddBinary(key string, val []byte) {
+ enc.AddString(key, base64.StdEncoding.EncodeToString(val))
+}
+
+func (enc *jsonEncoder) AddByteString(key string, val []byte) {
+ enc.addKey(key)
+ enc.AppendByteString(val)
+}
+
+func (enc *jsonEncoder) AddBool(key string, val bool) {
+ enc.addKey(key)
+ enc.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
+ enc.addKey(key)
+ enc.AppendComplex128(val)
+}
+
+func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
+ enc.addKey(key)
+ enc.AppendDuration(val)
+}
+
+func (enc *jsonEncoder) AddFloat64(key string, val float64) {
+ enc.addKey(key)
+ enc.AppendFloat64(val)
+}
+
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+ enc.addKey(key)
+ enc.AppendFloat32(val)
+}
+
+func (enc *jsonEncoder) AddInt64(key string, val int64) {
+ enc.addKey(key)
+ enc.AppendInt64(val)
+}
+
+func (enc *jsonEncoder) resetReflectBuf() {
+ if enc.reflectBuf == nil {
+ enc.reflectBuf = bufferpool.Get()
+ enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
+
+ // For consistency with our custom JSON encoder.
+ enc.reflectEnc.SetEscapeHTML(false)
+ } else {
+ enc.reflectBuf.Reset()
+ }
+}
+
+var nullLiteralBytes = []byte("null")
+
+// Only invoke the standard JSON encoder if there is actually something to
+// encode; otherwise write JSON null literal directly.
+func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) {
+ if obj == nil {
+ return nullLiteralBytes, nil
+ }
+ enc.resetReflectBuf()
+ if err := enc.reflectEnc.Encode(obj); err != nil {
+ return nil, err
+ }
+ enc.reflectBuf.TrimNewline()
+ return enc.reflectBuf.Bytes(), nil
+}
+
+func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
+ valueBytes, err := enc.encodeReflected(obj)
+ if err != nil {
+ return err
+ }
+ enc.addKey(key)
+ _, err = enc.buf.Write(valueBytes)
+ return err
+}
+
+func (enc *jsonEncoder) OpenNamespace(key string) {
+ enc.addKey(key)
+ enc.buf.AppendByte('{')
+ enc.openNamespaces++
+}
+
+func (enc *jsonEncoder) AddString(key, val string) {
+ enc.addKey(key)
+ enc.AppendString(val)
+}
+
+func (enc *jsonEncoder) AddTime(key string, val time.Time) {
+ enc.addKey(key)
+ enc.AppendTime(val)
+}
+
+func (enc *jsonEncoder) AddUint64(key string, val uint64) {
+ enc.addKey(key)
+ enc.AppendUint64(val)
+}
+
+func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('[')
+ err := arr.MarshalLogArray(enc)
+ enc.buf.AppendByte(']')
+ return err
+}
+
+func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('{')
+ err := obj.MarshalLogObject(enc)
+ enc.buf.AppendByte('}')
+ return err
+}
+
+func (enc *jsonEncoder) AppendBool(val bool) {
+ enc.addElementSeparator()
+ enc.buf.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AppendByteString(val []byte) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddByteString(val)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendComplex128(val complex128) {
+ enc.addElementSeparator()
+ // Cast to a platform-independent, fixed-size type.
+ r, i := float64(real(val)), float64(imag(val))
+ enc.buf.AppendByte('"')
+ // Because we're always in a quoted string, we can use strconv without
+ // special-casing NaN and +/-Inf.
+ enc.buf.AppendFloat(r, 64)
+ // If imaginary part is less than 0, minus (-) sign is added by default
+ // by AppendFloat.
+ if i >= 0 {
+ enc.buf.AppendByte('+')
+ }
+ enc.buf.AppendFloat(i, 64)
+ enc.buf.AppendByte('i')
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendDuration(val time.Duration) {
+ cur := enc.buf.Len()
+ if e := enc.EncodeDuration; e != nil {
+ e(val, enc)
+ }
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
+ // JSON valid.
+ enc.AppendInt64(int64(val))
+ }
+}
+
+func (enc *jsonEncoder) AppendInt64(val int64) {
+ enc.addElementSeparator()
+ enc.buf.AppendInt(val)
+}
+
+func (enc *jsonEncoder) AppendReflected(val interface{}) error {
+ valueBytes, err := enc.encodeReflected(val)
+ if err != nil {
+ return err
+ }
+ enc.addElementSeparator()
+ _, err = enc.buf.Write(valueBytes)
+ return err
+}
+
+func (enc *jsonEncoder) AppendString(val string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(val)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.buf.AppendTime(time, layout)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendTime(val time.Time) {
+ cur := enc.buf.Len()
+ if e := enc.EncodeTime; e != nil {
+ e(val, enc)
+ }
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
+ // output JSON valid.
+ enc.AppendInt64(val.UnixNano())
+ }
+}
+
+func (enc *jsonEncoder) AppendUint64(val uint64) {
+ enc.addElementSeparator()
+ enc.buf.AppendUint(val)
+}
+
+func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+
+func (enc *jsonEncoder) Clone() Encoder {
+ clone := enc.clone()
+ clone.buf.Write(enc.buf.Bytes())
+ return clone
+}
+
+func (enc *jsonEncoder) clone() *jsonEncoder {
+ clone := getJSONEncoder()
+ clone.EncoderConfig = enc.EncoderConfig
+ clone.spaced = enc.spaced
+ clone.openNamespaces = enc.openNamespaces
+ clone.buf = bufferpool.Get()
+ return clone
+}
+
+func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ final := enc.clone()
+ final.buf.AppendByte('{')
+
+ if final.LevelKey != "" {
+ final.addKey(final.LevelKey)
+ cur := final.buf.Len()
+ final.EncodeLevel(ent.Level, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeLevel was a no-op. Fall back to strings to keep
+ // output JSON valid.
+ final.AppendString(ent.Level.String())
+ }
+ }
+ if final.TimeKey != "" {
+ final.AddTime(final.TimeKey, ent.Time)
+ }
+ if ent.LoggerName != "" && final.NameKey != "" {
+ final.addKey(final.NameKey)
+ cur := final.buf.Len()
+ nameEncoder := final.EncodeName
+
+ // if no name encoder provided, fall back to FullNameEncoder for backwards
+ // compatibility
+ if nameEncoder == nil {
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeName was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.LoggerName)
+ }
+ }
+ if ent.Caller.Defined {
+ if final.CallerKey != "" {
+ final.addKey(final.CallerKey)
+ cur := final.buf.Len()
+ final.EncodeCaller(ent.Caller, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeCaller was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.Caller.String())
+ }
+ }
+ if final.FunctionKey != "" {
+ final.addKey(final.FunctionKey)
+ final.AppendString(ent.Caller.Function)
+ }
+ }
+ if final.MessageKey != "" {
+ final.addKey(enc.MessageKey)
+ final.AppendString(ent.Message)
+ }
+ if enc.buf.Len() > 0 {
+ final.addElementSeparator()
+ final.buf.Write(enc.buf.Bytes())
+ }
+ addFields(final, fields)
+ final.closeOpenNamespaces()
+ if ent.Stack != "" && final.StacktraceKey != "" {
+ final.AddString(final.StacktraceKey, ent.Stack)
+ }
+ final.buf.AppendByte('}')
+ if final.LineEnding != "" {
+ final.buf.AppendString(final.LineEnding)
+ } else {
+ final.buf.AppendString(DefaultLineEnding)
+ }
+
+ ret := final.buf
+ putJSONEncoder(final)
+ return ret, nil
+}
+
+func (enc *jsonEncoder) truncate() {
+ enc.buf.Reset()
+}
+
+func (enc *jsonEncoder) closeOpenNamespaces() {
+ for i := 0; i < enc.openNamespaces; i++ {
+ enc.buf.AppendByte('}')
+ }
+}
+
+func (enc *jsonEncoder) addKey(key string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(key)
+ enc.buf.AppendByte('"')
+ enc.buf.AppendByte(':')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+}
+
+func (enc *jsonEncoder) addElementSeparator() {
+ last := enc.buf.Len() - 1
+ if last < 0 {
+ return
+ }
+ switch enc.buf.Bytes()[last] {
+ case '{', '[', ':', ',', ' ':
+ return
+ default:
+ enc.buf.AppendByte(',')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+ }
+}
+
+func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
+ enc.addElementSeparator()
+ switch {
+ case math.IsNaN(val):
+ enc.buf.AppendString(`"NaN"`)
+ case math.IsInf(val, 1):
+ enc.buf.AppendString(`"+Inf"`)
+ case math.IsInf(val, -1):
+ enc.buf.AppendString(`"-Inf"`)
+ default:
+ enc.buf.AppendFloat(val, bitSize)
+ }
+}
+
+// safeAddString JSON-escapes a string and appends it to the internal buffer.
+// Unlike the standard library's encoder, it doesn't attempt to protect the
+// user from browser vulnerabilities or JSONP-related problems.
+func (enc *jsonEncoder) safeAddString(s string) {
+ for i := 0; i < len(s); {
+ if enc.tryAddRuneSelf(s[i]) {
+ i++
+ continue
+ }
+ r, size := utf8.DecodeRuneInString(s[i:])
+ if enc.tryAddRuneError(r, size) {
+ i++
+ continue
+ }
+ enc.buf.AppendString(s[i : i+size])
+ i += size
+ }
+}
+
+// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
+func (enc *jsonEncoder) safeAddByteString(s []byte) {
+ for i := 0; i < len(s); {
+ if enc.tryAddRuneSelf(s[i]) {
+ i++
+ continue
+ }
+ r, size := utf8.DecodeRune(s[i:])
+ if enc.tryAddRuneError(r, size) {
+ i++
+ continue
+ }
+ enc.buf.Write(s[i : i+size])
+ i += size
+ }
+}
+
+// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
+func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
+ if b >= utf8.RuneSelf {
+ return false
+ }
+ if 0x20 <= b && b != '\\' && b != '"' {
+ enc.buf.AppendByte(b)
+ return true
+ }
+ switch b {
+ case '\\', '"':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte(b)
+ case '\n':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('n')
+ case '\r':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('r')
+ case '\t':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('t')
+ default:
+ // Encode bytes < 0x20, except for the escape sequences above.
+ enc.buf.AppendString(`\u00`)
+ enc.buf.AppendByte(_hex[b>>4])
+ enc.buf.AppendByte(_hex[b&0xF])
+ }
+ return true
+}
+
+func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
+ if r == utf8.RuneError && size == 1 {
+ enc.buf.AppendString(`\ufffd`)
+ return true
+ }
+ return false
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
new file mode 100644
index 0000000..e575c9f
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level")
+
+// A Level is a logging priority. Higher levels are more important.
+type Level int8
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel Level = iota - 1
+ // InfoLevel is the default logging priority.
+ InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel
+
+ _minLevel = DebugLevel
+ _maxLevel = FatalLevel
+)
+
+// String returns a lower-case ASCII representation of the log level.
+func (l Level) String() string {
+ switch l {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warn"
+ case ErrorLevel:
+ return "error"
+ case DPanicLevel:
+ return "dpanic"
+ case PanicLevel:
+ return "panic"
+ case FatalLevel:
+ return "fatal"
+ default:
+ return fmt.Sprintf("Level(%d)", l)
+ }
+}
+
+// CapitalString returns an all-caps ASCII representation of the log level.
+func (l Level) CapitalString() string {
+ // Printing levels in all-caps is common enough that we should export this
+ // functionality.
+ switch l {
+ case DebugLevel:
+ return "DEBUG"
+ case InfoLevel:
+ return "INFO"
+ case WarnLevel:
+ return "WARN"
+ case ErrorLevel:
+ return "ERROR"
+ case DPanicLevel:
+ return "DPANIC"
+ case PanicLevel:
+ return "PANIC"
+ case FatalLevel:
+ return "FATAL"
+ default:
+ return fmt.Sprintf("LEVEL(%d)", l)
+ }
+}
+
+// MarshalText marshals the Level to text. Note that the text representation
+// drops the -Level suffix (see example).
+func (l Level) MarshalText() ([]byte, error) {
+ return []byte(l.String()), nil
+}
+
+// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText
+// expects the text representation of a Level to drop the -Level suffix (see
+// example).
+//
+// In particular, this makes it easy to configure logging levels using YAML,
+// TOML, or JSON files.
+func (l *Level) UnmarshalText(text []byte) error {
+ if l == nil {
+ return errUnmarshalNilLevel
+ }
+ if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) {
+ return fmt.Errorf("unrecognized level: %q", text)
+ }
+ return nil
+}
+
+func (l *Level) unmarshalText(text []byte) bool {
+ switch string(text) {
+ case "debug", "DEBUG":
+ *l = DebugLevel
+ case "info", "INFO", "": // make the zero value useful
+ *l = InfoLevel
+ case "warn", "WARN":
+ *l = WarnLevel
+ case "error", "ERROR":
+ *l = ErrorLevel
+ case "dpanic", "DPANIC":
+ *l = DPanicLevel
+ case "panic", "PANIC":
+ *l = PanicLevel
+ case "fatal", "FATAL":
+ *l = FatalLevel
+ default:
+ return false
+ }
+ return true
+}
+
+// Set sets the level for the flag.Value interface.
+func (l *Level) Set(s string) error {
+ return l.UnmarshalText([]byte(s))
+}
+
+// Get gets the level for the flag.Getter interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Enabled returns true if the given level is at or above this level.
+func (l Level) Enabled(lvl Level) bool {
+ return lvl >= l
+}
+
+// LevelEnabler decides whether a given logging level is enabled when logging a
+// message.
+//
+// Enablers are intended to be used to implement deterministic filters;
+// concerns like sampling are better implemented as a Core.
+//
+// Each concrete Level value implements a static LevelEnabler which returns
+// true for itself and all higher logging levels. For example WarnLevel.Enabled()
+// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and
+// FatalLevel, but return false for InfoLevel and DebugLevel.
+type LevelEnabler interface {
+ Enabled(Level) bool
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go
new file mode 100644
index 0000000..7af8dad
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level_strings.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/zap/internal/color"
+
+var (
+ _levelToColor = map[Level]color.Color{
+ DebugLevel: color.Magenta,
+ InfoLevel: color.Blue,
+ WarnLevel: color.Yellow,
+ ErrorLevel: color.Red,
+ DPanicLevel: color.Red,
+ PanicLevel: color.Red,
+ FatalLevel: color.Red,
+ }
+ _unknownLevelColor = color.Red
+
+ _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor))
+ _levelToCapitalColorString = make(map[Level]string, len(_levelToColor))
+)
+
+func init() {
+ for level, color := range _levelToColor {
+ _levelToLowercaseColorString[level] = color.Add(level.String())
+ _levelToCapitalColorString[level] = color.Add(level.CapitalString())
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go
new file mode 100644
index 0000000..c3c55ba
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/marshaler.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// ObjectMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+//
+// Note: ObjectMarshaler is only used when zap.Object is used or when
+// passed directly to zap.Any. It is not used when reflection-based
+// encoding is used.
+type ObjectMarshaler interface {
+ MarshalLogObject(ObjectEncoder) error
+}
+
+// ObjectMarshalerFunc is a type adapter that turns a function into an
+// ObjectMarshaler.
+type ObjectMarshalerFunc func(ObjectEncoder) error
+
+// MarshalLogObject calls the underlying function.
+func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error {
+ return f(enc)
+}
+
+// ArrayMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+//
+// Note: ArrayMarshaler is only used when zap.Array is used or when
+// passed directly to zap.Any. It is not used when reflection-based
+// encoding is used.
+type ArrayMarshaler interface {
+ MarshalLogArray(ArrayEncoder) error
+}
+
+// ArrayMarshalerFunc is a type adapter that turns a function into an
+// ArrayMarshaler.
+type ArrayMarshalerFunc func(ArrayEncoder) error
+
+// MarshalLogArray calls the underlying function.
+func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error {
+ return f(enc)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
new file mode 100644
index 0000000..dfead08
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "time"
+
+// MapObjectEncoder is an ObjectEncoder backed by a simple
+// map[string]interface{}. It's not fast enough for production use, but it's
+// helpful in tests.
+type MapObjectEncoder struct {
+ // Fields contains the entire encoded log context.
+ Fields map[string]interface{}
+ // cur is a pointer to the namespace we're currently writing to.
+ cur map[string]interface{}
+}
+
+// NewMapObjectEncoder creates a new map-backed ObjectEncoder.
+func NewMapObjectEncoder() *MapObjectEncoder {
+ m := make(map[string]interface{})
+ return &MapObjectEncoder{
+ Fields: m,
+ cur: m,
+ }
+}
+
+// AddArray implements ObjectEncoder.
+func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error {
+ arr := &sliceArrayEncoder{elems: make([]interface{}, 0)}
+ err := v.MarshalLogArray(arr)
+ m.cur[key] = arr.elems
+ return err
+}
+
+// AddObject implements ObjectEncoder.
+func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error {
+ newMap := NewMapObjectEncoder()
+ m.cur[k] = newMap.Fields
+ return v.MarshalLogObject(newMap)
+}
+
+// AddBinary implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v }
+
+// AddByteString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) }
+
+// AddBool implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v }
+
+// AddDuration implements ObjectEncoder.
+func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v }
+
+// AddComplex128 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v }
+
+// AddComplex64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v }
+
+// AddFloat64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v }
+
+// AddFloat32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v }
+
+// AddInt implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v }
+
+// AddInt64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v }
+
+// AddInt32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v }
+
+// AddInt16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v }
+
+// AddInt8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v }
+
+// AddString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v }
+
+// AddTime implements ObjectEncoder.
+func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v }
+
+// AddUint implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v }
+
+// AddUint64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v }
+
+// AddUint32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v }
+
+// AddUint16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v }
+
+// AddUint8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v }
+
+// AddUintptr implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v }
+
+// AddReflected implements ObjectEncoder.
+func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error {
+ m.cur[k] = v
+ return nil
+}
+
+// OpenNamespace implements ObjectEncoder.
+func (m *MapObjectEncoder) OpenNamespace(k string) {
+ ns := make(map[string]interface{})
+ m.cur[k] = ns
+ m.cur = ns
+}
+
+// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like
+// the MapObjectEncoder, it's not designed for production use.
+type sliceArrayEncoder struct {
+ elems []interface{}
+}
+
+func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error {
+ enc := &sliceArrayEncoder{}
+ err := v.MarshalLogArray(enc)
+ s.elems = append(s.elems, enc.elems)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error {
+ m := NewMapObjectEncoder()
+ err := v.MarshalLogObject(m)
+ s.elems = append(s.elems, m.Fields)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendReflected(v interface{}) error {
+ s.elems = append(s.elems, v)
+ return nil
+}
+
+func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) }
+func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) }
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
new file mode 100644
index 0000000..31ed96e
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -0,0 +1,210 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "time"
+
+ "go.uber.org/atomic"
+)
+
+const (
+ _numLevels = _maxLevel - _minLevel + 1
+ _countersPerLevel = 4096
+)
+
+type counter struct {
+ resetAt atomic.Int64
+ counter atomic.Uint64
+}
+
+type counters [_numLevels][_countersPerLevel]counter
+
+func newCounters() *counters {
+ return &counters{}
+}
+
+func (cs *counters) get(lvl Level, key string) *counter {
+ i := lvl - _minLevel
+ j := fnv32a(key) % _countersPerLevel
+ return &cs[i][j]
+}
+
+// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc
+func fnv32a(s string) uint32 {
+ const (
+ offset32 = 2166136261
+ prime32 = 16777619
+ )
+ hash := uint32(offset32)
+ for i := 0; i < len(s); i++ {
+ hash ^= uint32(s[i])
+ hash *= prime32
+ }
+ return hash
+}
+
+func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
+ tn := t.UnixNano()
+ resetAfter := c.resetAt.Load()
+ if resetAfter > tn {
+ return c.counter.Inc()
+ }
+
+ c.counter.Store(1)
+
+ newResetAfter := tn + tick.Nanoseconds()
+ if !c.resetAt.CAS(resetAfter, newResetAfter) {
+ // We raced with another goroutine trying to reset, and it also reset
+ // the counter to 1, so we need to reincrement the counter.
+ return c.counter.Inc()
+ }
+
+ return 1
+}
+
+// SamplingDecision is a decision represented as a bit field made by sampler.
+// More decisions may be added in the future.
+type SamplingDecision uint32
+
+const (
+ // LogDropped indicates that the Sampler dropped a log entry.
+ LogDropped SamplingDecision = 1 << iota
+ // LogSampled indicates that the Sampler sampled a log entry.
+ LogSampled
+)
+
+// optionFunc wraps a func so it satisfies the SamplerOption interface.
+type optionFunc func(*sampler)
+
+func (f optionFunc) apply(s *sampler) {
+ f(s)
+}
+
+// SamplerOption configures a Sampler.
+type SamplerOption interface {
+ apply(*sampler)
+}
+
+// nopSamplingHook is the default hook used by sampler.
+func nopSamplingHook(Entry, SamplingDecision) {}
+
+// SamplerHook registers a function which will be called when Sampler makes a
+// decision.
+//
+// This hook may be used to get visibility into the performance of the sampler.
+// For example, use it to track metrics of dropped versus sampled logs.
+//
+// var dropped atomic.Int64
+// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
+// if dec&zapcore.LogDropped > 0 {
+// dropped.Inc()
+// }
+// })
+func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
+ return optionFunc(func(s *sampler) {
+ s.hook = hook
+ })
+}
+
+// NewSamplerWithOptions creates a Core that samples incoming entries, which
+// caps the CPU and I/O load of logging while attempting to preserve a
+// representative subset of your logs.
+//
+// Zap samples by logging the first N entries with a given level and message
+// each tick. If more Entries with the same level and message are seen during
+// the same interval, every Mth message is logged and the rest are dropped.
+//
+// Sampler can be configured to report sampling decisions with the SamplerHook
+// option.
+//
+// Keep in mind that zap's sampling implementation is optimized for speed over
+// absolute precision; under load, each tick may be slightly over- or
+// under-sampled.
+func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
+ s := &sampler{
+ Core: core,
+ tick: tick,
+ counts: newCounters(),
+ first: uint64(first),
+ thereafter: uint64(thereafter),
+ hook: nopSamplingHook,
+ }
+ for _, opt := range opts {
+ opt.apply(s)
+ }
+
+ return s
+}
+
+type sampler struct {
+ Core
+
+ counts *counters
+ tick time.Duration
+ first, thereafter uint64
+ hook func(Entry, SamplingDecision)
+}
+
+// NewSampler creates a Core that samples incoming entries, which
+// caps the CPU and I/O load of logging while attempting to preserve a
+// representative subset of your logs.
+//
+// Zap samples by logging the first N entries with a given level and message
+// each tick. If more Entries with the same level and message are seen during
+// the same interval, every Mth message is logged and the rest are dropped.
+//
+// Keep in mind that zap's sampling implementation is optimized for speed over
+// absolute precision; under load, each tick may be slightly over- or
+// under-sampled.
+//
+// Deprecated: use NewSamplerWithOptions.
+func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
+ return NewSamplerWithOptions(core, tick, first, thereafter)
+}
+
+func (s *sampler) With(fields []Field) Core {
+ return &sampler{
+ Core: s.Core.With(fields),
+ tick: s.tick,
+ counts: s.counts,
+ first: s.first,
+ thereafter: s.thereafter,
+ hook: s.hook,
+ }
+}
+
+func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if !s.Enabled(ent.Level) {
+ return ce
+ }
+
+ if ent.Level >= _minLevel && ent.Level <= _maxLevel {
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (n-s.first)%s.thereafter != 0 {
+ s.hook(ent, LogDropped)
+ return ce
+ }
+ s.hook(ent, LogSampled)
+ }
+ return s.Core.Check(ent, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
new file mode 100644
index 0000000..07a32ee
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type multiCore []Core
+
+// NewTee creates a Core that duplicates log entries into two or more
+// underlying Cores.
+//
+// Calling it with a single Core returns the input unchanged, and calling
+// it with no input returns a no-op Core.
+func NewTee(cores ...Core) Core {
+ switch len(cores) {
+ case 0:
+ return NewNopCore()
+ case 1:
+ return cores[0]
+ default:
+ return multiCore(cores)
+ }
+}
+
+func (mc multiCore) With(fields []Field) Core {
+ clone := make(multiCore, len(mc))
+ for i := range mc {
+ clone[i] = mc[i].With(fields)
+ }
+ return clone
+}
+
+func (mc multiCore) Enabled(lvl Level) bool {
+ for i := range mc {
+ if mc[i].Enabled(lvl) {
+ return true
+ }
+ }
+ return false
+}
+
+func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ for i := range mc {
+ ce = mc[i].Check(ent, ce)
+ }
+ return ce
+}
+
+func (mc multiCore) Write(ent Entry, fields []Field) error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Write(ent, fields))
+ }
+ return err
+}
+
+func (mc multiCore) Sync() error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Sync())
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go
new file mode 100644
index 0000000..d4a1af3
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go
@@ -0,0 +1,122 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "io"
+ "sync"
+
+ "go.uber.org/multierr"
+)
+
+// A WriteSyncer is an io.Writer that can also flush any buffered data. Note
+// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer.
+type WriteSyncer interface {
+ io.Writer
+ Sync() error
+}
+
+// AddSync converts an io.Writer to a WriteSyncer. It attempts to be
+// intelligent: if the concrete type of the io.Writer implements WriteSyncer,
+// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync.
+func AddSync(w io.Writer) WriteSyncer {
+ switch w := w.(type) {
+ case WriteSyncer:
+ return w
+ default:
+ return writerWrapper{w}
+ }
+}
+
+type lockedWriteSyncer struct {
+ sync.Mutex
+ ws WriteSyncer
+}
+
+// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In
+// particular, *os.Files must be locked before use.
+func Lock(ws WriteSyncer) WriteSyncer {
+ if _, ok := ws.(*lockedWriteSyncer); ok {
+ // no need to layer on another lock
+ return ws
+ }
+ return &lockedWriteSyncer{ws: ws}
+}
+
+func (s *lockedWriteSyncer) Write(bs []byte) (int, error) {
+ s.Lock()
+ n, err := s.ws.Write(bs)
+ s.Unlock()
+ return n, err
+}
+
+func (s *lockedWriteSyncer) Sync() error {
+ s.Lock()
+ err := s.ws.Sync()
+ s.Unlock()
+ return err
+}
+
+type writerWrapper struct {
+ io.Writer
+}
+
+func (w writerWrapper) Sync() error {
+ return nil
+}
+
+type multiWriteSyncer []WriteSyncer
+
+// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes
+// and sync calls, much like io.MultiWriter.
+func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer {
+ if len(ws) == 1 {
+ return ws[0]
+ }
+ return multiWriteSyncer(ws)
+}
+
+// See https://golang.org/src/io/multi.go
+// When not all underlying syncers write the same number of bytes,
+// the smallest number is returned even though Write() is called on
+// all of them.
+func (ws multiWriteSyncer) Write(p []byte) (int, error) {
+ var writeErr error
+ nWritten := 0
+ for _, w := range ws {
+ n, err := w.Write(p)
+ writeErr = multierr.Append(writeErr, err)
+ if nWritten == 0 && n != 0 {
+ nWritten = n
+ } else if n < nWritten {
+ nWritten = n
+ }
+ }
+ return nWritten, writeErr
+}
+
+func (ws multiWriteSyncer) Sync() error {
+ var err error
+ for _, w := range ws {
+ err = multierr.Append(err, w.Sync())
+ }
+ return err
+}
diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go
new file mode 100644
index 0000000..3d6f516
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/socks/client.go
@@ -0,0 +1,168 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package socks
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net"
+ "strconv"
+ "time"
+)
+
+var (
+ noDeadline = time.Time{}
+ aLongTimeAgo = time.Unix(1, 0)
+)
+
+func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) {
+ host, port, err := splitHostPort(address)
+ if err != nil {
+ return nil, err
+ }
+ if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() {
+ c.SetDeadline(deadline)
+ defer c.SetDeadline(noDeadline)
+ }
+ if ctx != context.Background() {
+ errCh := make(chan error, 1)
+ done := make(chan struct{})
+ defer func() {
+ close(done)
+ if ctxErr == nil {
+ ctxErr = <-errCh
+ }
+ }()
+ go func() {
+ select {
+ case <-ctx.Done():
+ c.SetDeadline(aLongTimeAgo)
+ errCh <- ctx.Err()
+ case <-done:
+ errCh <- nil
+ }
+ }()
+ }
+
+ b := make([]byte, 0, 6+len(host)) // the size here is just an estimate
+ b = append(b, Version5)
+ if len(d.AuthMethods) == 0 || d.Authenticate == nil {
+ b = append(b, 1, byte(AuthMethodNotRequired))
+ } else {
+ ams := d.AuthMethods
+ if len(ams) > 255 {
+ return nil, errors.New("too many authentication methods")
+ }
+ b = append(b, byte(len(ams)))
+ for _, am := range ams {
+ b = append(b, byte(am))
+ }
+ }
+ if _, ctxErr = c.Write(b); ctxErr != nil {
+ return
+ }
+
+ if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil {
+ return
+ }
+ if b[0] != Version5 {
+ return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
+ }
+ am := AuthMethod(b[1])
+ if am == AuthMethodNoAcceptableMethods {
+ return nil, errors.New("no acceptable authentication methods")
+ }
+ if d.Authenticate != nil {
+ if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil {
+ return
+ }
+ }
+
+ b = b[:0]
+ b = append(b, Version5, byte(d.cmd), 0)
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ b = append(b, AddrTypeIPv4)
+ b = append(b, ip4...)
+ } else if ip6 := ip.To16(); ip6 != nil {
+ b = append(b, AddrTypeIPv6)
+ b = append(b, ip6...)
+ } else {
+ return nil, errors.New("unknown address type")
+ }
+ } else {
+ if len(host) > 255 {
+ return nil, errors.New("FQDN too long")
+ }
+ b = append(b, AddrTypeFQDN)
+ b = append(b, byte(len(host)))
+ b = append(b, host...)
+ }
+ b = append(b, byte(port>>8), byte(port))
+ if _, ctxErr = c.Write(b); ctxErr != nil {
+ return
+ }
+
+ if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil {
+ return
+ }
+ if b[0] != Version5 {
+ return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
+ }
+ if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded {
+ return nil, errors.New("unknown error " + cmdErr.String())
+ }
+ if b[2] != 0 {
+ return nil, errors.New("non-zero reserved field")
+ }
+ l := 2
+ var a Addr
+ switch b[3] {
+ case AddrTypeIPv4:
+ l += net.IPv4len
+ a.IP = make(net.IP, net.IPv4len)
+ case AddrTypeIPv6:
+ l += net.IPv6len
+ a.IP = make(net.IP, net.IPv6len)
+ case AddrTypeFQDN:
+ if _, err := io.ReadFull(c, b[:1]); err != nil {
+ return nil, err
+ }
+ l += int(b[0])
+ default:
+ return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3])))
+ }
+ if cap(b) < l {
+ b = make([]byte, l)
+ } else {
+ b = b[:l]
+ }
+ if _, ctxErr = io.ReadFull(c, b); ctxErr != nil {
+ return
+ }
+ if a.IP != nil {
+ copy(a.IP, b)
+ } else {
+ a.Name = string(b[:len(b)-2])
+ }
+ a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1])
+ return &a, nil
+}
+
+func splitHostPort(address string) (string, int, error) {
+ host, port, err := net.SplitHostPort(address)
+ if err != nil {
+ return "", 0, err
+ }
+ portnum, err := strconv.Atoi(port)
+ if err != nil {
+ return "", 0, err
+ }
+ if 1 > portnum || portnum > 0xffff {
+ return "", 0, errors.New("port number out of range " + port)
+ }
+ return host, portnum, nil
+}
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
new file mode 100644
index 0000000..97db234
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/socks/socks.go
@@ -0,0 +1,317 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package socks provides a SOCKS version 5 client implementation.
+//
+// SOCKS protocol version 5 is defined in RFC 1928.
+// Username/Password authentication for SOCKS version 5 is defined in
+// RFC 1929.
+package socks
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net"
+ "strconv"
+)
+
+// A Command represents a SOCKS command.
+type Command int
+
+func (cmd Command) String() string {
+ switch cmd {
+ case CmdConnect:
+ return "socks connect"
+ case cmdBind:
+ return "socks bind"
+ default:
+ return "socks " + strconv.Itoa(int(cmd))
+ }
+}
+
+// An AuthMethod represents a SOCKS authentication method.
+type AuthMethod int
+
+// A Reply represents a SOCKS command reply code.
+type Reply int
+
+func (code Reply) String() string {
+ switch code {
+ case StatusSucceeded:
+ return "succeeded"
+ case 0x01:
+ return "general SOCKS server failure"
+ case 0x02:
+ return "connection not allowed by ruleset"
+ case 0x03:
+ return "network unreachable"
+ case 0x04:
+ return "host unreachable"
+ case 0x05:
+ return "connection refused"
+ case 0x06:
+ return "TTL expired"
+ case 0x07:
+ return "command not supported"
+ case 0x08:
+ return "address type not supported"
+ default:
+ return "unknown code: " + strconv.Itoa(int(code))
+ }
+}
+
+// Wire protocol constants.
+const (
+ Version5 = 0x05
+
+ AddrTypeIPv4 = 0x01
+ AddrTypeFQDN = 0x03
+ AddrTypeIPv6 = 0x04
+
+ CmdConnect Command = 0x01 // establishes an active-open forward proxy connection
+ cmdBind Command = 0x02 // establishes a passive-open forward proxy connection
+
+ AuthMethodNotRequired AuthMethod = 0x00 // no authentication required
+ AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password
+ AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods
+
+ StatusSucceeded Reply = 0x00
+)
+
+// An Addr represents a SOCKS-specific address.
+// Either Name or IP is used exclusively.
+type Addr struct {
+ Name string // fully-qualified domain name
+ IP net.IP
+ Port int
+}
+
+func (a *Addr) Network() string { return "socks" }
+
+func (a *Addr) String() string {
+ if a == nil {
+ return ""
+ }
+ port := strconv.Itoa(a.Port)
+ if a.IP == nil {
+ return net.JoinHostPort(a.Name, port)
+ }
+ return net.JoinHostPort(a.IP.String(), port)
+}
+
+// A Conn represents a forward proxy connection.
+type Conn struct {
+ net.Conn
+
+ boundAddr net.Addr
+}
+
+// BoundAddr returns the address assigned by the proxy server for
+// connecting to the command target address from the proxy server.
+func (c *Conn) BoundAddr() net.Addr {
+ if c == nil {
+ return nil
+ }
+ return c.boundAddr
+}
+
+// A Dialer holds SOCKS-specific options.
+type Dialer struct {
+ cmd Command // either CmdConnect or cmdBind
+ proxyNetwork string // network between a proxy server and a client
+ proxyAddress string // proxy server address
+
+ // ProxyDial specifies the optional dial function for
+ // establishing the transport connection.
+ ProxyDial func(context.Context, string, string) (net.Conn, error)
+
+ // AuthMethods specifies the list of request authentication
+ // methods.
+ // If empty, SOCKS client requests only AuthMethodNotRequired.
+ AuthMethods []AuthMethod
+
+ // Authenticate specifies the optional authentication
+ // function. It must be non-nil when AuthMethods is not empty.
+ // It must return an error when the authentication is failed.
+ Authenticate func(context.Context, io.ReadWriter, AuthMethod) error
+}
+
+// DialContext connects to the provided address on the provided
+// network.
+//
+// The returned error value may be a net.OpError. When the Op field of
+// net.OpError contains "socks", the Source field contains a proxy
+// server address and the Addr field contains a command target
+// address.
+//
+// See func Dial of the net package of standard library for a
+// description of the network and address parameters.
+func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ if err := d.validateTarget(network, address); err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ if ctx == nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
+ }
+ var err error
+ var c net.Conn
+ if d.ProxyDial != nil {
+ c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress)
+ } else {
+ var dd net.Dialer
+ c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress)
+ }
+ if err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ a, err := d.connect(ctx, c, address)
+ if err != nil {
+ c.Close()
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ return &Conn{Conn: c, boundAddr: a}, nil
+}
+
+// DialWithConn initiates a connection from SOCKS server to the target
+// network and address using the connection c that is already
+// connected to the SOCKS server.
+//
+// It returns the connection's local address assigned by the SOCKS
+// server.
+func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) {
+ if err := d.validateTarget(network, address); err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ if ctx == nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
+ }
+ a, err := d.connect(ctx, c, address)
+ if err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ return a, nil
+}
+
+// Dial connects to the provided address on the provided network.
+//
+// Unlike DialContext, it returns a raw transport connection instead
+// of a forward proxy connection.
+//
+// Deprecated: Use DialContext or DialWithConn instead.
+func (d *Dialer) Dial(network, address string) (net.Conn, error) {
+ if err := d.validateTarget(network, address); err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ var err error
+ var c net.Conn
+ if d.ProxyDial != nil {
+ c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress)
+ } else {
+ c, err = net.Dial(d.proxyNetwork, d.proxyAddress)
+ }
+ if err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (d *Dialer) validateTarget(network, address string) error {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return errors.New("network not implemented")
+ }
+ switch d.cmd {
+ case CmdConnect, cmdBind:
+ default:
+ return errors.New("command not implemented")
+ }
+ return nil
+}
+
+func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) {
+ for i, s := range []string{d.proxyAddress, address} {
+ host, port, err := splitHostPort(s)
+ if err != nil {
+ return nil, nil, err
+ }
+ a := &Addr{Port: port}
+ a.IP = net.ParseIP(host)
+ if a.IP == nil {
+ a.Name = host
+ }
+ if i == 0 {
+ proxy = a
+ } else {
+ dst = a
+ }
+ }
+ return
+}
+
+// NewDialer returns a new Dialer that dials through the provided
+// proxy server's network and address.
+func NewDialer(network, address string) *Dialer {
+ return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect}
+}
+
+const (
+ authUsernamePasswordVersion = 0x01
+ authStatusSucceeded = 0x00
+)
+
+// UsernamePassword are the credentials for the username/password
+// authentication method.
+type UsernamePassword struct {
+ Username string
+ Password string
+}
+
+// Authenticate authenticates a pair of username and password with the
+// proxy server.
+func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error {
+ switch auth {
+ case AuthMethodNotRequired:
+ return nil
+ case AuthMethodUsernamePassword:
+ if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 {
+ return errors.New("invalid username/password")
+ }
+ b := []byte{authUsernamePasswordVersion}
+ b = append(b, byte(len(up.Username)))
+ b = append(b, up.Username...)
+ b = append(b, byte(len(up.Password)))
+ b = append(b, up.Password...)
+ // TODO(mikio): handle IO deadlines and cancelation if
+ // necessary
+ if _, err := rw.Write(b); err != nil {
+ return err
+ }
+ if _, err := io.ReadFull(rw, b[:2]); err != nil {
+ return err
+ }
+ if b[0] != authUsernamePasswordVersion {
+ return errors.New("invalid username/password version")
+ }
+ if b[1] != authStatusSucceeded {
+ return errors.New("username/password authentication failed")
+ }
+ return nil
+ }
+ return errors.New("unsupported authentication method " + strconv.Itoa(int(auth)))
+}
diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go
new file mode 100644
index 0000000..811c2e4
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/dial.go
@@ -0,0 +1,54 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "context"
+ "net"
+)
+
+// A ContextDialer dials using a context.
+type ContextDialer interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment.
+//
+// The passed ctx is only used for returning the Conn, not the lifetime of the Conn.
+//
+// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer
+// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout.
+//
+// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
+func Dial(ctx context.Context, network, address string) (net.Conn, error) {
+ d := FromEnvironment()
+ if xd, ok := d.(ContextDialer); ok {
+ return xd.DialContext(ctx, network, address)
+ }
+ return dialContext(ctx, d, network, address)
+}
+
+// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout
+// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
+func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) {
+ var (
+ conn net.Conn
+ done = make(chan struct{}, 1)
+ err error
+ )
+ go func() {
+ conn, err = d.Dial(network, address)
+ close(done)
+ if conn != nil && ctx.Err() != nil {
+ conn.Close()
+ }
+ }()
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ case <-done:
+ }
+ return conn, err
+}
diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go
new file mode 100644
index 0000000..3d66bde
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/direct.go
@@ -0,0 +1,31 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "context"
+ "net"
+)
+
+type direct struct{}
+
+// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext.
+var Direct = direct{}
+
+var (
+ _ Dialer = Direct
+ _ ContextDialer = Direct
+)
+
+// Dial directly invokes net.Dial with the supplied parameters.
+func (direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
+
+// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters.
+func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
+ var d net.Dialer
+ return d.DialContext(ctx, network, addr)
+}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
new file mode 100644
index 0000000..573fe79
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/per_host.go
@@ -0,0 +1,155 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "context"
+ "net"
+ "strings"
+)
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type PerHost struct {
+ def, bypass Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
+ return &PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+// DialContext connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ d := p.dialerForRequest(host)
+ if x, ok := d.(ContextDialer); ok {
+ return x.DialContext(ctx, network, addr)
+ }
+ return dialContext(ctx, d, network, addr)
+}
+
+func (p *PerHost) dialerForRequest(host string) Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone ".example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go
new file mode 100644
index 0000000..9ff4b9a
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/proxy.go
@@ -0,0 +1,149 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+package proxy // import "golang.org/x/net/proxy"
+
+import (
+ "errors"
+ "net"
+ "net/url"
+ "os"
+ "sync"
+)
+
+// A Dialer is a means to establish a connection.
+// Custom dialers should also implement ContextDialer.
+type Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy-related
+// variables in the environment and makes underlying connections
+// directly.
+func FromEnvironment() Dialer {
+ return FromEnvironmentUsing(Direct)
+}
+
+// FromEnvironmentUsing returns the dialer specify by the proxy-related
+// variables in the environment and makes underlying connections
+// using the provided forwarding Dialer (for instance, a *net.Dialer
+// with desired configuration).
+func FromEnvironmentUsing(forward Dialer) Dialer {
+ allProxy := allProxyEnv.Get()
+ if len(allProxy) == 0 {
+ return forward
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return forward
+ }
+ proxy, err := FromURL(proxyURL, forward)
+ if err != nil {
+ return forward
+ }
+
+ noProxy := noProxyEnv.Get()
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := NewPerHost(proxy, forward)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {
+ if proxySchemes == nil {
+ proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))
+ }
+ proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
+ var auth *Auth
+ if u.User != nil {
+ auth = new(Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5", "socks5h":
+ addr := u.Hostname()
+ port := u.Port()
+ if port == "" {
+ port = "1080"
+ }
+ return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxySchemes != nil {
+ if f, ok := proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+ allProxyEnv = &envOnce{
+ names: []string{"ALL_PROXY", "all_proxy"},
+ }
+ noProxyEnv = &envOnce{
+ names: []string{"NO_PROXY", "no_proxy"},
+ }
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type envOnce struct {
+ names []string
+ once sync.Once
+ val string
+}
+
+func (e *envOnce) Get() string {
+ e.once.Do(e.init)
+ return e.val
+}
+
+func (e *envOnce) init() {
+ for _, n := range e.names {
+ e.val = os.Getenv(n)
+ if e.val != "" {
+ return
+ }
+ }
+}
+
+// reset is used by tests
+func (e *envOnce) reset() {
+ e.once = sync.Once{}
+ e.val = ""
+}
diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go
new file mode 100644
index 0000000..c91651f
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/socks5.go
@@ -0,0 +1,42 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "context"
+ "net"
+
+ "golang.org/x/net/internal/socks"
+)
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given
+// address with an optional username and password.
+// See RFC 1928 and RFC 1929.
+func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) {
+ d := socks.NewDialer(network, address)
+ if forward != nil {
+ if f, ok := forward.(ContextDialer); ok {
+ d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
+ return f.DialContext(ctx, network, address)
+ }
+ } else {
+ d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
+ return dialContext(ctx, forward, network, address)
+ }
+ }
+ }
+ if auth != nil {
+ up := socks.UsernamePassword{
+ Username: auth.User,
+ Password: auth.Password,
+ }
+ d.AuthMethods = []socks.AuthMethod{
+ socks.AuthMethodNotRequired,
+ socks.AuthMethodUsernamePassword,
+ }
+ d.Authenticate = up.Authenticate
+ }
+ return d, nil
+}
diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS
new file mode 100644
index 0000000..2b00ddb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1fbd3e9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/google.golang.org/protobuf/LICENSE b/vendor/google.golang.org/protobuf/LICENSE
new file mode 100644
index 0000000..49ea0f9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2018 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/protobuf/PATENTS b/vendor/google.golang.org/protobuf/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
new file mode 100644
index 0000000..8fb1d9e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -0,0 +1,773 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package prototext
+
+import (
+ "fmt"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/encoding/text"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/set"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Unmarshal reads the given []byte into the given proto.Message.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func Unmarshal(b []byte, m proto.Message) error {
+ return UnmarshalOptions{}.Unmarshal(b, m)
+}
+
+// UnmarshalOptions is a configurable textproto format unmarshaler.
+type UnmarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // AllowPartial accepts input for messages that will result in missing
+ // required fields. If AllowPartial is false (the default), Unmarshal will
+ // return error if there are any missing required fields.
+ AllowPartial bool
+
+ // DiscardUnknown specifies whether to ignore unknown fields when parsing.
+ // An unknown field is any field whose field name or field number does not
+ // resolve to any known or extension field in the message.
+ // By default, unmarshal rejects unknown fields as an error.
+ DiscardUnknown bool
+
+ // Resolver is used for looking up types when unmarshaling
+ // google.protobuf.Any messages or extension fields.
+ // If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ protoregistry.MessageTypeResolver
+ protoregistry.ExtensionTypeResolver
+ }
+}
+
+// Unmarshal reads the given []byte and populates the given proto.Message
+// using options in the UnmarshalOptions object.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
+ return o.unmarshal(b, m)
+}
+
+// unmarshal is a centralized function that all unmarshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for unmarshal that do not go through this.
+func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
+ proto.Reset(m)
+
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+
+ dec := decoder{text.NewDecoder(b), o}
+ if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
+ return err
+ }
+ if o.AllowPartial {
+ return nil
+ }
+ return proto.CheckInitialized(m)
+}
+
+type decoder struct {
+ *text.Decoder
+ opts UnmarshalOptions
+}
+
+// newError returns an error object with position info.
+func (d decoder) newError(pos int, f string, x ...interface{}) error {
+ line, column := d.Position(pos)
+ head := fmt.Sprintf("(line %d:%d): ", line, column)
+ return errors.New(head+f, x...)
+}
+
+// unexpectedTokenError returns a syntax error for the given unexpected token.
+func (d decoder) unexpectedTokenError(tok text.Token) error {
+ return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString())
+}
+
+// syntaxError returns a syntax error for given position.
+func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+ line, column := d.Position(pos)
+ head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
+ return errors.New(head+f, x...)
+}
+
+// unmarshalMessage unmarshals into the given protoreflect.Message.
+func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
+ messageDesc := m.Descriptor()
+ if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
+ return errors.New("no support for proto1 MessageSets")
+ }
+
+ if messageDesc.FullName() == genid.Any_message_fullname {
+ return d.unmarshalAny(m, checkDelims)
+ }
+
+ if checkDelims {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+
+ if tok.Kind() != text.MessageOpen {
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ var seenNums set.Ints
+ var seenOneofs set.Ints
+ fieldDescs := messageDesc.Fields()
+
+ for {
+ // Read field name.
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch typ := tok.Kind(); typ {
+ case text.Name:
+ // Continue below.
+ case text.EOF:
+ if checkDelims {
+ return text.ErrUnexpectedEOF
+ }
+ return nil
+ default:
+ if checkDelims && typ == text.MessageClose {
+ return nil
+ }
+ return d.unexpectedTokenError(tok)
+ }
+
+ // Resolve the field descriptor.
+ var name pref.Name
+ var fd pref.FieldDescriptor
+ var xt pref.ExtensionType
+ var xtErr error
+ var isFieldNumberName bool
+
+ switch tok.NameKind() {
+ case text.IdentName:
+ name = pref.Name(tok.IdentName())
+ fd = fieldDescs.ByTextName(string(name))
+
+ case text.TypeName:
+ // Handle extensions only. This code path is not for Any.
+ xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName()))
+
+ case text.FieldNumber:
+ isFieldNumberName = true
+ num := pref.FieldNumber(tok.FieldNumber())
+ if !num.IsValid() {
+ return d.newError(tok.Pos(), "invalid field number: %d", num)
+ }
+ fd = fieldDescs.ByNumber(num)
+ if fd == nil {
+ xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num)
+ }
+ }
+
+ if xt != nil {
+ fd = xt.TypeDescriptor()
+ if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
+ return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
+ }
+ } else if xtErr != nil && xtErr != protoregistry.NotFound {
+ return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
+ }
+ if flags.ProtoLegacy {
+ if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
+ fd = nil // reset since the weak reference is not linked in
+ }
+ }
+
+ // Handle unknown fields.
+ if fd == nil {
+ if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) {
+ d.skipValue()
+ continue
+ }
+ return d.newError(tok.Pos(), "unknown field: %v", tok.RawString())
+ }
+
+ // Handle fields identified by field number.
+ if isFieldNumberName {
+ // TODO: Add an option to permit parsing field numbers.
+ //
+ // This requires careful thought as the MarshalOptions.EmitUnknown
+ // option allows formatting unknown fields as the field number and the
+ // best-effort textual representation of the field value. In that case,
+ // it may not be possible to unmarshal the value from a parser that does
+ // have information about the unknown field.
+ return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString())
+ }
+
+ switch {
+ case fd.IsList():
+ kind := fd.Kind()
+ if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+
+ list := m.Mutable(fd).List()
+ if err := d.unmarshalList(fd, list); err != nil {
+ return err
+ }
+
+ case fd.IsMap():
+ mmap := m.Mutable(fd).Map()
+ if err := d.unmarshalMap(fd, mmap); err != nil {
+ return err
+ }
+
+ default:
+ kind := fd.Kind()
+ if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+
+ // If field is a oneof, check if it has already been set.
+ if od := fd.ContainingOneof(); od != nil {
+ idx := uint64(od.Index())
+ if seenOneofs.Has(idx) {
+ return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName())
+ }
+ seenOneofs.Set(idx)
+ }
+
+ num := uint64(fd.Number())
+ if seenNums.Has(num) {
+ return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString())
+ }
+
+ if err := d.unmarshalSingular(fd, m); err != nil {
+ return err
+ }
+ seenNums.Set(num)
+ }
+ }
+
+ return nil
+}
+
+// unmarshalSingular unmarshals a non-repeated field value specified by the
+// given FieldDescriptor.
+func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error {
+ var val pref.Value
+ var err error
+ switch fd.Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ val = m.NewField(fd)
+ err = d.unmarshalMessage(val.Message(), true)
+ default:
+ val, err = d.unmarshalScalar(fd)
+ }
+ if err == nil {
+ m.Set(fd, val)
+ }
+ return err
+}
+
+// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the
+// given FieldDescriptor.
+func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
+ tok, err := d.Read()
+ if err != nil {
+ return pref.Value{}, err
+ }
+
+ if tok.Kind() != text.Scalar {
+ return pref.Value{}, d.unexpectedTokenError(tok)
+ }
+
+ kind := fd.Kind()
+ switch kind {
+ case pref.BoolKind:
+ if b, ok := tok.Bool(); ok {
+ return pref.ValueOfBool(b), nil
+ }
+
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if n, ok := tok.Int32(); ok {
+ return pref.ValueOfInt32(n), nil
+ }
+
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if n, ok := tok.Int64(); ok {
+ return pref.ValueOfInt64(n), nil
+ }
+
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if n, ok := tok.Uint32(); ok {
+ return pref.ValueOfUint32(n), nil
+ }
+
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if n, ok := tok.Uint64(); ok {
+ return pref.ValueOfUint64(n), nil
+ }
+
+ case pref.FloatKind:
+ if n, ok := tok.Float32(); ok {
+ return pref.ValueOfFloat32(n), nil
+ }
+
+ case pref.DoubleKind:
+ if n, ok := tok.Float64(); ok {
+ return pref.ValueOfFloat64(n), nil
+ }
+
+ case pref.StringKind:
+ if s, ok := tok.String(); ok {
+ if strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
+ return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
+ }
+ return pref.ValueOfString(s), nil
+ }
+
+ case pref.BytesKind:
+ if b, ok := tok.String(); ok {
+ return pref.ValueOfBytes([]byte(b)), nil
+ }
+
+ case pref.EnumKind:
+ if lit, ok := tok.Enum(); ok {
+ // Lookup EnumNumber based on name.
+ if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil {
+ return pref.ValueOfEnum(enumVal.Number()), nil
+ }
+ }
+ if num, ok := tok.Int32(); ok {
+ return pref.ValueOfEnum(pref.EnumNumber(num)), nil
+ }
+
+ default:
+ panic(fmt.Sprintf("invalid scalar kind %v", kind))
+ }
+
+ return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+}
+
+// unmarshalList unmarshals into given protoreflect.List. A list value can
+// either be in [] syntax or simply just a single scalar/message value.
+func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ switch fd.Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ switch tok.Kind() {
+ case text.ListOpen:
+ d.Read()
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ switch tok.Kind() {
+ case text.ListClose:
+ d.Read()
+ return nil
+ case text.MessageOpen:
+ pval := list.NewElement()
+ if err := d.unmarshalMessage(pval.Message(), true); err != nil {
+ return err
+ }
+ list.Append(pval)
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ case text.MessageOpen:
+ pval := list.NewElement()
+ if err := d.unmarshalMessage(pval.Message(), true); err != nil {
+ return err
+ }
+ list.Append(pval)
+ return nil
+ }
+
+ default:
+ switch tok.Kind() {
+ case text.ListOpen:
+ d.Read()
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ switch tok.Kind() {
+ case text.ListClose:
+ d.Read()
+ return nil
+ case text.Scalar:
+ pval, err := d.unmarshalScalar(fd)
+ if err != nil {
+ return err
+ }
+ list.Append(pval)
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ case text.Scalar:
+ pval, err := d.unmarshalScalar(fd)
+ if err != nil {
+ return err
+ }
+ list.Append(pval)
+ return nil
+ }
+ }
+
+ return d.unexpectedTokenError(tok)
+}
+
+// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
+// textproto message containing {key: , value: }.
+func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error {
+ // Determine ahead whether map entry is a scalar type or a message type in
+ // order to call the appropriate unmarshalMapValue func inside
+ // unmarshalMapEntry.
+ var unmarshalMapValue func() (pref.Value, error)
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ unmarshalMapValue = func() (pref.Value, error) {
+ pval := mmap.NewValue()
+ if err := d.unmarshalMessage(pval.Message(), true); err != nil {
+ return pref.Value{}, err
+ }
+ return pval, nil
+ }
+ default:
+ unmarshalMapValue = func() (pref.Value, error) {
+ return d.unmarshalScalar(fd.MapValue())
+ }
+ }
+
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.MessageOpen:
+ return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue)
+
+ case text.ListOpen:
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.ListClose:
+ return nil
+ case text.MessageOpen:
+ if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil {
+ return err
+ }
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+}
+
+// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
+// textproto message containing {key: , value: }.
+func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error {
+ var key pref.MapKey
+ var pval pref.Value
+Loop:
+ for {
+ // Read field name.
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.Name:
+ if tok.NameKind() != text.IdentName {
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString())
+ }
+ d.skipValue()
+ continue Loop
+ }
+ // Continue below.
+ case text.MessageClose:
+ break Loop
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+
+ switch name := pref.Name(tok.IdentName()); name {
+ case genid.MapEntry_Key_field_name:
+ if !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+ if key.IsValid() {
+ return d.newError(tok.Pos(), "map entry %q cannot be repeated", name)
+ }
+ val, err := d.unmarshalScalar(fd.MapKey())
+ if err != nil {
+ return err
+ }
+ key = val.MapKey()
+
+ case genid.MapEntry_Value_field_name:
+ if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) {
+ if !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+ }
+ if pval.IsValid() {
+ return d.newError(tok.Pos(), "map entry %q cannot be repeated", name)
+ }
+ pval, err = unmarshalMapValue()
+ if err != nil {
+ return err
+ }
+
+ default:
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "unknown map entry field %q", name)
+ }
+ d.skipValue()
+ }
+ }
+
+ if !key.IsValid() {
+ key = fd.MapKey().Default().MapKey()
+ }
+ if !pval.IsValid() {
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ // If value field is not set for message/group types, construct an
+ // empty one as default.
+ pval = mmap.NewValue()
+ default:
+ pval = fd.MapValue().Default()
+ }
+ }
+ mmap.Set(key, pval)
+ return nil
+}
+
+// unmarshalAny unmarshals an Any textproto. It can either be in expanded form
+// or non-expanded form.
+func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error {
+ var typeURL string
+ var bValue []byte
+ var seenTypeUrl bool
+ var seenValue bool
+ var isExpanded bool
+
+ if checkDelims {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+
+ if tok.Kind() != text.MessageOpen {
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+Loop:
+ for {
+ // Read field name. Can only have 3 possible field names, i.e. type_url,
+ // value and type URL name inside [].
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if typ := tok.Kind(); typ != text.Name {
+ if checkDelims {
+ if typ == text.MessageClose {
+ break Loop
+ }
+ } else if typ == text.EOF {
+ break Loop
+ }
+ return d.unexpectedTokenError(tok)
+ }
+
+ switch tok.NameKind() {
+ case text.IdentName:
+ // Both type_url and value fields require field separator :.
+ if !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+
+ switch name := pref.Name(tok.IdentName()); name {
+ case genid.Any_TypeUrl_field_name:
+ if seenTypeUrl {
+ return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname)
+ }
+ if isExpanded {
+ return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
+ }
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ var ok bool
+ typeURL, ok = tok.String()
+ if !ok {
+ return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString())
+ }
+ seenTypeUrl = true
+
+ case genid.Any_Value_field_name:
+ if seenValue {
+ return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname)
+ }
+ if isExpanded {
+ return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
+ }
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ s, ok := tok.String()
+ if !ok {
+ return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString())
+ }
+ bValue = []byte(s)
+ seenValue = true
+
+ default:
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname)
+ }
+ }
+
+ case text.TypeName:
+ if isExpanded {
+ return d.newError(tok.Pos(), "cannot have more than one type")
+ }
+ if seenTypeUrl {
+ return d.newError(tok.Pos(), "conflict with type_url field")
+ }
+ typeURL = tok.TypeName()
+ var err error
+ bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos())
+ if err != nil {
+ return err
+ }
+ isExpanded = true
+
+ default:
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname)
+ }
+ }
+ }
+
+ fds := m.Descriptor().Fields()
+ if len(typeURL) > 0 {
+ m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL))
+ }
+ if len(bValue) > 0 {
+ m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue))
+ }
+ return nil
+}
+
+func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) {
+ mt, err := d.opts.Resolver.FindMessageByURL(typeURL)
+ if err != nil {
+ return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err)
+ }
+ // Create new message for the embedded message type and unmarshal the value
+ // field into it.
+ m := mt.New()
+ if err := d.unmarshalMessage(m, true); err != nil {
+ return nil, err
+ }
+ // Serialize the embedded message and return the resulting bytes.
+ b, err := proto.MarshalOptions{
+ AllowPartial: true, // Never check required fields inside an Any.
+ Deterministic: true,
+ }.Marshal(m.Interface())
+ if err != nil {
+ return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err)
+ }
+ return b, nil
+}
+
+// skipValue makes the decoder parse a field value in order to advance the read
+// to the next field. It relies on Read returning an error if the types are not
+// in valid sequence.
+func (d decoder) skipValue() error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ // Only need to continue reading for messages and lists.
+ switch tok.Kind() {
+ case text.MessageOpen:
+ return d.skipMessageValue()
+
+ case text.ListOpen:
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.ListClose:
+ return nil
+ case text.MessageOpen:
+ return d.skipMessageValue()
+ default:
+ // Skip items. This will not validate whether skipped values are
+ // of the same type or not, same behavior as C++
+ // TextFormat::Parser::AllowUnknownField(true) version 3.8.0.
+ if err := d.skipValue(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// skipMessageValue makes the decoder parse and skip over all fields in a
+// message. It assumes that the previous read type is MessageOpen.
+func (d decoder) skipMessageValue() error {
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.MessageClose:
+ return nil
+ case text.Name:
+ if err := d.skipValue(); err != nil {
+ return err
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go
new file mode 100644
index 0000000..162b4f9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package prototext marshals and unmarshals protocol buffer messages as the
+// textproto format.
+package prototext
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
new file mode 100644
index 0000000..8d5304d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -0,0 +1,371 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package prototext
+
+import (
+ "fmt"
+ "strconv"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/encoding/text"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/order"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const defaultIndent = " "
+
+// Format formats the message as a multiline string.
+// This function is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func Format(m proto.Message) string {
+ return MarshalOptions{Multiline: true}.Format(m)
+}
+
+// Marshal writes the given proto.Message in textproto format using default
+// options. Do not depend on the output being stable. It may change over time
+// across different versions of the program.
+func Marshal(m proto.Message) ([]byte, error) {
+ return MarshalOptions{}.Marshal(m)
+}
+
+// MarshalOptions is a configurable text format marshaler.
+type MarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // Multiline specifies whether the marshaler should format the output in
+ // indented-form with every textual element on a new line.
+ // If Indent is an empty string, then an arbitrary indent is chosen.
+ Multiline bool
+
+ // Indent specifies the set of indentation characters to use in a multiline
+ // formatted output such that every entry is preceded by Indent and
+ // terminated by a newline. If non-empty, then Multiline is treated as true.
+ // Indent can only be composed of space or tab characters.
+ Indent string
+
+ // EmitASCII specifies whether to format strings and bytes as ASCII only
+ // as opposed to using UTF-8 encoding when possible.
+ EmitASCII bool
+
+ // allowInvalidUTF8 specifies whether to permit the encoding of strings
+ // with invalid UTF-8. This is unexported as it is intended to only
+ // be specified by the Format method.
+ allowInvalidUTF8 bool
+
+ // AllowPartial allows messages that have missing required fields to marshal
+ // without returning an error. If AllowPartial is false (the default),
+ // Marshal will return error if there are any missing required fields.
+ AllowPartial bool
+
+ // EmitUnknown specifies whether to emit unknown fields in the output.
+ // If specified, the unmarshaler may be unable to parse the output.
+ // The default is to exclude unknown fields.
+ EmitUnknown bool
+
+ // Resolver is used for looking up types when expanding google.protobuf.Any
+ // messages. If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ protoregistry.ExtensionTypeResolver
+ protoregistry.MessageTypeResolver
+ }
+}
+
+// Format formats the message as a string.
+// This method is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func (o MarshalOptions) Format(m proto.Message) string {
+ if m == nil || !m.ProtoReflect().IsValid() {
+ return "" // invalid syntax, but okay since this is for debugging
+ }
+ o.allowInvalidUTF8 = true
+ o.AllowPartial = true
+ o.EmitUnknown = true
+ b, _ := o.Marshal(m)
+ return string(b)
+}
+
+// Marshal writes the given proto.Message in textproto format using options in
+// MarshalOptions object. Do not depend on the output being stable. It may
+// change over time across different versions of the program.
+func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
+ return o.marshal(m)
+}
+
+// marshal is a centralized function that all marshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for marshal that do not go through this.
+func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
+ var delims = [2]byte{'{', '}'}
+
+ if o.Multiline && o.Indent == "" {
+ o.Indent = defaultIndent
+ }
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+
+ internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII)
+ if err != nil {
+ return nil, err
+ }
+
+ // Treat nil message interface as an empty message,
+ // in which case there is nothing to output.
+ if m == nil {
+ return []byte{}, nil
+ }
+
+ enc := encoder{internalEnc, o}
+ err = enc.marshalMessage(m.ProtoReflect(), false)
+ if err != nil {
+ return nil, err
+ }
+ out := enc.Bytes()
+ if len(o.Indent) > 0 && len(out) > 0 {
+ out = append(out, '\n')
+ }
+ if o.AllowPartial {
+ return out, nil
+ }
+ return out, proto.CheckInitialized(m)
+}
+
+type encoder struct {
+ *text.Encoder
+ opts MarshalOptions
+}
+
+// marshalMessage marshals the given protoreflect.Message.
+func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
+ messageDesc := m.Descriptor()
+ if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
+ return errors.New("no support for proto1 MessageSets")
+ }
+
+ if inclDelims {
+ e.StartMessage()
+ defer e.EndMessage()
+ }
+
+ // Handle Any expansion.
+ if messageDesc.FullName() == genid.Any_message_fullname {
+ if e.marshalAny(m) {
+ return nil
+ }
+ // If unable to expand, continue on to marshal Any as a regular message.
+ }
+
+ // Marshal fields.
+ var err error
+ order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if err = e.marshalField(fd.TextName(), v, fd); err != nil {
+ return false
+ }
+ return true
+ })
+ if err != nil {
+ return err
+ }
+
+ // Marshal unknown fields.
+ if e.opts.EmitUnknown {
+ e.marshalUnknown(m.GetUnknown())
+ }
+
+ return nil
+}
+
+// marshalField marshals the given field with protoreflect.Value.
+func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error {
+ switch {
+ case fd.IsList():
+ return e.marshalList(name, val.List(), fd)
+ case fd.IsMap():
+ return e.marshalMap(name, val.Map(), fd)
+ default:
+ e.WriteName(name)
+ return e.marshalSingular(val, fd)
+ }
+}
+
+// marshalSingular marshals the given non-repeated field value. This includes
+// all scalar types, enums, messages, and groups.
+func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
+ kind := fd.Kind()
+ switch kind {
+ case pref.BoolKind:
+ e.WriteBool(val.Bool())
+
+ case pref.StringKind:
+ s := val.String()
+ if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
+ return errors.InvalidUTF8(string(fd.FullName()))
+ }
+ e.WriteString(s)
+
+ case pref.Int32Kind, pref.Int64Kind,
+ pref.Sint32Kind, pref.Sint64Kind,
+ pref.Sfixed32Kind, pref.Sfixed64Kind:
+ e.WriteInt(val.Int())
+
+ case pref.Uint32Kind, pref.Uint64Kind,
+ pref.Fixed32Kind, pref.Fixed64Kind:
+ e.WriteUint(val.Uint())
+
+ case pref.FloatKind:
+ // Encoder.WriteFloat handles the special numbers NaN and infinites.
+ e.WriteFloat(val.Float(), 32)
+
+ case pref.DoubleKind:
+ // Encoder.WriteFloat handles the special numbers NaN and infinites.
+ e.WriteFloat(val.Float(), 64)
+
+ case pref.BytesKind:
+ e.WriteString(string(val.Bytes()))
+
+ case pref.EnumKind:
+ num := val.Enum()
+ if desc := fd.Enum().Values().ByNumber(num); desc != nil {
+ e.WriteLiteral(string(desc.Name()))
+ } else {
+ // Use numeric value if there is no enum description.
+ e.WriteInt(int64(num))
+ }
+
+ case pref.MessageKind, pref.GroupKind:
+ return e.marshalMessage(val.Message(), true)
+
+ default:
+ panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
+ }
+ return nil
+}
+
+// marshalList marshals the given protoreflect.List as multiple name-value fields.
+func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error {
+ size := list.Len()
+ for i := 0; i < size; i++ {
+ e.WriteName(name)
+ if err := e.marshalSingular(list.Get(i), fd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
+func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error {
+ var err error
+ order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool {
+ e.WriteName(name)
+ e.StartMessage()
+ defer e.EndMessage()
+
+ e.WriteName(string(genid.MapEntry_Key_field_name))
+ err = e.marshalSingular(key.Value(), fd.MapKey())
+ if err != nil {
+ return false
+ }
+
+ e.WriteName(string(genid.MapEntry_Value_field_name))
+ err = e.marshalSingular(val, fd.MapValue())
+ if err != nil {
+ return false
+ }
+ return true
+ })
+ return err
+}
+
+// marshalUnknown parses the given []byte and marshals fields out.
+// This function assumes proper encoding in the given []byte.
+func (e encoder) marshalUnknown(b []byte) {
+ const dec = 10
+ const hex = 16
+ for len(b) > 0 {
+ num, wtype, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ e.WriteName(strconv.FormatInt(int64(num), dec))
+
+ switch wtype {
+ case protowire.VarintType:
+ var v uint64
+ v, n = protowire.ConsumeVarint(b)
+ e.WriteUint(v)
+ case protowire.Fixed32Type:
+ var v uint32
+ v, n = protowire.ConsumeFixed32(b)
+ e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex))
+ case protowire.Fixed64Type:
+ var v uint64
+ v, n = protowire.ConsumeFixed64(b)
+ e.WriteLiteral("0x" + strconv.FormatUint(v, hex))
+ case protowire.BytesType:
+ var v []byte
+ v, n = protowire.ConsumeBytes(b)
+ e.WriteString(string(v))
+ case protowire.StartGroupType:
+ e.StartMessage()
+ var v []byte
+ v, n = protowire.ConsumeGroup(num, b)
+ e.marshalUnknown(v)
+ e.EndMessage()
+ default:
+ panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype))
+ }
+
+ b = b[n:]
+ }
+}
+
+// marshalAny marshals the given google.protobuf.Any message in expanded form.
+// It returns true if it was able to marshal, else false.
+func (e encoder) marshalAny(any pref.Message) bool {
+ // Construct the embedded message.
+ fds := any.Descriptor().Fields()
+ fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
+ typeURL := any.Get(fdType).String()
+ mt, err := e.opts.Resolver.FindMessageByURL(typeURL)
+ if err != nil {
+ return false
+ }
+ m := mt.New().Interface()
+
+ // Unmarshal bytes into embedded message.
+ fdValue := fds.ByNumber(genid.Any_Value_field_number)
+ value := any.Get(fdValue)
+ err = proto.UnmarshalOptions{
+ AllowPartial: true,
+ Resolver: e.opts.Resolver,
+ }.Unmarshal(value.Bytes(), m)
+ if err != nil {
+ return false
+ }
+
+ // Get current encoder position. If marshaling fails, reset encoder output
+ // back to this position.
+ pos := e.Snapshot()
+
+ // Field name is the proto field name enclosed in [].
+ e.WriteName("[" + typeURL + "]")
+ err = e.marshalMessage(m.ProtoReflect(), true)
+ if err != nil {
+ e.Reset(pos)
+ return false
+ }
+ return true
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
new file mode 100644
index 0000000..a427f8b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -0,0 +1,538 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protowire parses and formats the raw wire encoding.
+// See https://developers.google.com/protocol-buffers/docs/encoding.
+//
+// For marshaling and unmarshaling entire protobuf messages,
+// use the "google.golang.org/protobuf/proto" package instead.
+package protowire
+
+import (
+ "io"
+ "math"
+ "math/bits"
+
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// Number represents the field number.
+type Number int32
+
+const (
+ MinValidNumber Number = 1
+ FirstReservedNumber Number = 19000
+ LastReservedNumber Number = 19999
+ MaxValidNumber Number = 1<<29 - 1
+)
+
+// IsValid reports whether the field number is semantically valid.
+//
+// Note that while numbers within the reserved range are semantically invalid,
+// they are syntactically valid in the wire format.
+// Implementations may treat records with reserved field numbers as unknown.
+func (n Number) IsValid() bool {
+ return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber
+}
+
+// Type represents the wire type.
+type Type int8
+
+const (
+ VarintType Type = 0
+ Fixed32Type Type = 5
+ Fixed64Type Type = 1
+ BytesType Type = 2
+ StartGroupType Type = 3
+ EndGroupType Type = 4
+)
+
+const (
+ _ = -iota
+ errCodeTruncated
+ errCodeFieldNumber
+ errCodeOverflow
+ errCodeReserved
+ errCodeEndGroup
+)
+
+var (
+ errFieldNumber = errors.New("invalid field number")
+ errOverflow = errors.New("variable length integer overflow")
+ errReserved = errors.New("cannot parse reserved wire type")
+ errEndGroup = errors.New("mismatching end group marker")
+ errParse = errors.New("parse error")
+)
+
+// ParseError converts an error code into an error value.
+// This returns nil if n is a non-negative number.
+func ParseError(n int) error {
+ if n >= 0 {
+ return nil
+ }
+ switch n {
+ case errCodeTruncated:
+ return io.ErrUnexpectedEOF
+ case errCodeFieldNumber:
+ return errFieldNumber
+ case errCodeOverflow:
+ return errOverflow
+ case errCodeReserved:
+ return errReserved
+ case errCodeEndGroup:
+ return errEndGroup
+ default:
+ return errParse
+ }
+}
+
+// ConsumeField parses an entire field record (both tag and value) and returns
+// the field number, the wire type, and the total length.
+// This returns a negative length upon an error (see ParseError).
+//
+// The total length includes the tag header and the end group marker (if the
+// field is a group).
+func ConsumeField(b []byte) (Number, Type, int) {
+ num, typ, n := ConsumeTag(b)
+ if n < 0 {
+ return 0, 0, n // forward error code
+ }
+ m := ConsumeFieldValue(num, typ, b[n:])
+ if m < 0 {
+ return 0, 0, m // forward error code
+ }
+ return num, typ, n + m
+}
+
+// ConsumeFieldValue parses a field value and returns its length.
+// This assumes that the field Number and wire Type have already been parsed.
+// This returns a negative length upon an error (see ParseError).
+//
+// When parsing a group, the length includes the end group marker and
+// the end group is verified to match the starting field number.
+func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
+ switch typ {
+ case VarintType:
+ _, n = ConsumeVarint(b)
+ return n
+ case Fixed32Type:
+ _, n = ConsumeFixed32(b)
+ return n
+ case Fixed64Type:
+ _, n = ConsumeFixed64(b)
+ return n
+ case BytesType:
+ _, n = ConsumeBytes(b)
+ return n
+ case StartGroupType:
+ n0 := len(b)
+ for {
+ num2, typ2, n := ConsumeTag(b)
+ if n < 0 {
+ return n // forward error code
+ }
+ b = b[n:]
+ if typ2 == EndGroupType {
+ if num != num2 {
+ return errCodeEndGroup
+ }
+ return n0 - len(b)
+ }
+
+ n = ConsumeFieldValue(num2, typ2, b)
+ if n < 0 {
+ return n // forward error code
+ }
+ b = b[n:]
+ }
+ case EndGroupType:
+ return errCodeEndGroup
+ default:
+ return errCodeReserved
+ }
+}
+
+// AppendTag encodes num and typ as a varint-encoded tag and appends it to b.
+func AppendTag(b []byte, num Number, typ Type) []byte {
+ return AppendVarint(b, EncodeTag(num, typ))
+}
+
+// ConsumeTag parses b as a varint-encoded tag, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeTag(b []byte) (Number, Type, int) {
+ v, n := ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0, n // forward error code
+ }
+ num, typ := DecodeTag(v)
+ if num < MinValidNumber {
+ return 0, 0, errCodeFieldNumber
+ }
+ return num, typ, n
+}
+
+func SizeTag(num Number) int {
+ return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size
+}
+
+// AppendVarint appends v to b as a varint-encoded uint64.
+func AppendVarint(b []byte, v uint64) []byte {
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+// ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeVarint(b []byte) (v uint64, n int) {
+ var y uint64
+ if len(b) <= 0 {
+ return 0, errCodeTruncated
+ }
+ v = uint64(b[0])
+ if v < 0x80 {
+ return v, 1
+ }
+ v -= 0x80
+
+ if len(b) <= 1 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[1])
+ v += y << 7
+ if y < 0x80 {
+ return v, 2
+ }
+ v -= 0x80 << 7
+
+ if len(b) <= 2 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[2])
+ v += y << 14
+ if y < 0x80 {
+ return v, 3
+ }
+ v -= 0x80 << 14
+
+ if len(b) <= 3 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[3])
+ v += y << 21
+ if y < 0x80 {
+ return v, 4
+ }
+ v -= 0x80 << 21
+
+ if len(b) <= 4 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[4])
+ v += y << 28
+ if y < 0x80 {
+ return v, 5
+ }
+ v -= 0x80 << 28
+
+ if len(b) <= 5 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[5])
+ v += y << 35
+ if y < 0x80 {
+ return v, 6
+ }
+ v -= 0x80 << 35
+
+ if len(b) <= 6 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[6])
+ v += y << 42
+ if y < 0x80 {
+ return v, 7
+ }
+ v -= 0x80 << 42
+
+ if len(b) <= 7 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[7])
+ v += y << 49
+ if y < 0x80 {
+ return v, 8
+ }
+ v -= 0x80 << 49
+
+ if len(b) <= 8 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[8])
+ v += y << 56
+ if y < 0x80 {
+ return v, 9
+ }
+ v -= 0x80 << 56
+
+ if len(b) <= 9 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[9])
+ v += y << 63
+ if y < 2 {
+ return v, 10
+ }
+ return 0, errCodeOverflow
+}
+
+// SizeVarint returns the encoded size of a varint.
+// The size is guaranteed to be within 1 and 10, inclusive.
+func SizeVarint(v uint64) int {
+ // This computes 1 + (bits.Len64(v)-1)/7.
+ // 9/64 is a good enough approximation of 1/7
+ return int(9*uint32(bits.Len64(v))+64) / 64
+}
+
+// AppendFixed32 appends v to b as a little-endian uint32.
+func AppendFixed32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v>>0),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+}
+
+// ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeFixed32(b []byte) (v uint32, n int) {
+ if len(b) < 4 {
+ return 0, errCodeTruncated
+ }
+ v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ return v, 4
+}
+
+// SizeFixed32 returns the encoded size of a fixed32; which is always 4.
+func SizeFixed32() int {
+ return 4
+}
+
+// AppendFixed64 appends v to b as a little-endian uint64.
+func AppendFixed64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v>>0),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+}
+
+// ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeFixed64(b []byte) (v uint64, n int) {
+ if len(b) < 8 {
+ return 0, errCodeTruncated
+ }
+ v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ return v, 8
+}
+
+// SizeFixed64 returns the encoded size of a fixed64; which is always 8.
+func SizeFixed64() int {
+ return 8
+}
+
+// AppendBytes appends v to b as a length-prefixed bytes value.
+func AppendBytes(b []byte, v []byte) []byte {
+ return append(AppendVarint(b, uint64(len(v))), v...)
+}
+
+// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeBytes(b []byte) (v []byte, n int) {
+ m, n := ConsumeVarint(b)
+ if n < 0 {
+ return nil, n // forward error code
+ }
+ if m > uint64(len(b[n:])) {
+ return nil, errCodeTruncated
+ }
+ return b[n:][:m], n + int(m)
+}
+
+// SizeBytes returns the encoded size of a length-prefixed bytes value,
+// given only the length.
+func SizeBytes(n int) int {
+ return SizeVarint(uint64(n)) + n
+}
+
+// AppendString appends v to b as a length-prefixed bytes value.
+func AppendString(b []byte, v string) []byte {
+ return append(AppendVarint(b, uint64(len(v))), v...)
+}
+
+// ConsumeString parses b as a length-prefixed bytes value, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeString(b []byte) (v string, n int) {
+ bb, n := ConsumeBytes(b)
+ return string(bb), n
+}
+
+// AppendGroup appends v to b as group value, with a trailing end group marker.
+// The value v must not contain the end marker.
+func AppendGroup(b []byte, num Number, v []byte) []byte {
+ return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType))
+}
+
+// ConsumeGroup parses b as a group value until the trailing end group marker,
+// and verifies that the end marker matches the provided num. The value v
+// does not contain the end marker, while the length does contain the end marker.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
+ n = ConsumeFieldValue(num, StartGroupType, b)
+ if n < 0 {
+ return nil, n // forward error code
+ }
+ b = b[:n]
+
+ // Truncate off end group marker, but need to handle denormalized varints.
+ // Assuming end marker is never 0 (which is always the case since
+ // EndGroupType is non-zero), we can truncate all trailing bytes where the
+ // lower 7 bits are all zero (implying that the varint is denormalized).
+ for len(b) > 0 && b[len(b)-1]&0x7f == 0 {
+ b = b[:len(b)-1]
+ }
+ b = b[:len(b)-SizeTag(num)]
+ return b, n
+}
+
+// SizeGroup returns the encoded size of a group, given only the length.
+func SizeGroup(num Number, n int) int {
+ return n + SizeTag(num)
+}
+
+// DecodeTag decodes the field Number and wire Type from its unified form.
+// The Number is -1 if the decoded field number overflows int32.
+// Other than overflow, this does not check for field number validity.
+func DecodeTag(x uint64) (Number, Type) {
+ // NOTE: MessageSet allows for larger field numbers than normal.
+ if x>>3 > uint64(math.MaxInt32) {
+ return -1, 0
+ }
+ return Number(x >> 3), Type(x & 7)
+}
+
+// EncodeTag encodes the field Number and wire Type into its unified form.
+func EncodeTag(num Number, typ Type) uint64 {
+ return uint64(num)<<3 | uint64(typ&7)
+}
+
+// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64.
+// Input: {…, 5, 3, 1, 0, 2, 4, 6, …}
+// Output: {…, -3, -2, -1, 0, +1, +2, +3, …}
+func DecodeZigZag(x uint64) int64 {
+ return int64(x>>1) ^ int64(x)<<63>>63
+}
+
+// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64.
+// Input: {…, -3, -2, -1, 0, +1, +2, +3, …}
+// Output: {…, 5, 3, 1, 0, 2, 4, 6, …}
+func EncodeZigZag(x int64) uint64 {
+ return uint64(x<<1) ^ uint64(x>>63)
+}
+
+// DecodeBool decodes a uint64 as a bool.
+// Input: { 0, 1, 2, …}
+// Output: {false, true, true, …}
+func DecodeBool(x uint64) bool {
+ return x != 0
+}
+
+// EncodeBool encodes a bool as a uint64.
+// Input: {false, true}
+// Output: { 0, 1}
+func EncodeBool(x bool) uint64 {
+ if x {
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
new file mode 100644
index 0000000..360c633
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -0,0 +1,318 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package descfmt provides functionality to format descriptors.
+package descfmt
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/internal/detrand"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type list interface {
+ Len() int
+ pragma.DoNotImplement
+}
+
+func FormatList(s fmt.State, r rune, vs list) {
+ io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
+}
+func formatListOpt(vs list, isRoot, allowMulti bool) string {
+ start, end := "[", "]"
+ if isRoot {
+ var name string
+ switch vs.(type) {
+ case pref.Names:
+ name = "Names"
+ case pref.FieldNumbers:
+ name = "FieldNumbers"
+ case pref.FieldRanges:
+ name = "FieldRanges"
+ case pref.EnumRanges:
+ name = "EnumRanges"
+ case pref.FileImports:
+ name = "FileImports"
+ case pref.Descriptor:
+ name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
+ default:
+ name = reflect.ValueOf(vs).Elem().Type().Name()
+ }
+ start, end = name+"{", "}"
+ }
+
+ var ss []string
+ switch vs := vs.(type) {
+ case pref.Names:
+ for i := 0; i < vs.Len(); i++ {
+ ss = append(ss, fmt.Sprint(vs.Get(i)))
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.FieldNumbers:
+ for i := 0; i < vs.Len(); i++ {
+ ss = append(ss, fmt.Sprint(vs.Get(i)))
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.FieldRanges:
+ for i := 0; i < vs.Len(); i++ {
+ r := vs.Get(i)
+ if r[0]+1 == r[1] {
+ ss = append(ss, fmt.Sprintf("%d", r[0]))
+ } else {
+ ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive
+ }
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.EnumRanges:
+ for i := 0; i < vs.Len(); i++ {
+ r := vs.Get(i)
+ if r[0] == r[1] {
+ ss = append(ss, fmt.Sprintf("%d", r[0]))
+ } else {
+ ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive
+ }
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.FileImports:
+ for i := 0; i < vs.Len(); i++ {
+ var rs records
+ rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
+ ss = append(ss, "{"+rs.Join()+"}")
+ }
+ return start + joinStrings(ss, allowMulti) + end
+ default:
+ _, isEnumValue := vs.(pref.EnumValueDescriptors)
+ for i := 0; i < vs.Len(); i++ {
+ m := reflect.ValueOf(vs).MethodByName("Get")
+ v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
+ ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue))
+ }
+ return start + joinStrings(ss, allowMulti && isEnumValue) + end
+ }
+}
+
+// descriptorAccessors is a list of accessors to print for each descriptor.
+//
+// Do not print all accessors since some contain redundant information,
+// while others are pointers that we do not want to follow since the descriptor
+// is actually a cyclic graph.
+//
+// Using a list allows us to print the accessors in a sensible order.
+var descriptorAccessors = map[reflect.Type][]string{
+ reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
+ reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
+ reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
+ reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
+ reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
+ reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"},
+ reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"},
+ reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
+}
+
+func FormatDesc(s fmt.State, r rune, t pref.Descriptor) {
+ io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
+}
+func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
+ rv := reflect.ValueOf(t)
+ rt := rv.MethodByName("ProtoType").Type().In(0)
+
+ start, end := "{", "}"
+ if isRoot {
+ start = rt.Name() + "{"
+ }
+
+ _, isFile := t.(pref.FileDescriptor)
+ rs := records{allowMulti: allowMulti}
+ if t.IsPlaceholder() {
+ if isFile {
+ rs.Append(rv, "Path", "Package", "IsPlaceholder")
+ } else {
+ rs.Append(rv, "FullName", "IsPlaceholder")
+ }
+ } else {
+ switch {
+ case isFile:
+ rs.Append(rv, "Syntax")
+ case isRoot:
+ rs.Append(rv, "Syntax", "FullName")
+ default:
+ rs.Append(rv, "Name")
+ }
+ switch t := t.(type) {
+ case pref.FieldDescriptor:
+ for _, s := range descriptorAccessors[rt] {
+ switch s {
+ case "MapKey":
+ if k := t.MapKey(); k != nil {
+ rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
+ }
+ case "MapValue":
+ if v := t.MapValue(); v != nil {
+ switch v.Kind() {
+ case pref.EnumKind:
+ rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
+ case pref.MessageKind, pref.GroupKind:
+ rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
+ default:
+ rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
+ }
+ }
+ case "ContainingOneof":
+ if od := t.ContainingOneof(); od != nil {
+ rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())})
+ }
+ case "ContainingMessage":
+ if t.IsExtension() {
+ rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())})
+ }
+ case "Message":
+ if !t.IsMap() {
+ rs.Append(rv, s)
+ }
+ default:
+ rs.Append(rv, s)
+ }
+ }
+ case pref.OneofDescriptor:
+ var ss []string
+ fs := t.Fields()
+ for i := 0; i < fs.Len(); i++ {
+ ss = append(ss, string(fs.Get(i).Name()))
+ }
+ if len(ss) > 0 {
+ rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
+ }
+ default:
+ rs.Append(rv, descriptorAccessors[rt]...)
+ }
+ if rv.MethodByName("GoType").IsValid() {
+ rs.Append(rv, "GoType")
+ }
+ }
+ return start + rs.Join() + end
+}
+
+type records struct {
+ recs [][2]string
+ allowMulti bool
+}
+
+func (rs *records) Append(v reflect.Value, accessors ...string) {
+ for _, a := range accessors {
+ var rv reflect.Value
+ if m := v.MethodByName(a); m.IsValid() {
+ rv = m.Call(nil)[0]
+ }
+ if v.Kind() == reflect.Struct && !rv.IsValid() {
+ rv = v.FieldByName(a)
+ }
+ if !rv.IsValid() {
+ panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
+ }
+ if _, ok := rv.Interface().(pref.Value); ok {
+ rv = rv.MethodByName("Interface").Call(nil)[0]
+ if !rv.IsNil() {
+ rv = rv.Elem()
+ }
+ }
+
+ // Ignore zero values.
+ var isZero bool
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Slice:
+ isZero = rv.IsNil()
+ case reflect.Bool:
+ isZero = rv.Bool() == false
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ isZero = rv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ isZero = rv.Uint() == 0
+ case reflect.String:
+ isZero = rv.String() == ""
+ }
+ if n, ok := rv.Interface().(list); ok {
+ isZero = n.Len() == 0
+ }
+ if isZero {
+ continue
+ }
+
+ // Format the value.
+ var s string
+ v := rv.Interface()
+ switch v := v.(type) {
+ case list:
+ s = formatListOpt(v, false, rs.allowMulti)
+ case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor:
+ s = string(v.(pref.Descriptor).Name())
+ case pref.Descriptor:
+ s = string(v.FullName())
+ case string:
+ s = strconv.Quote(v)
+ case []byte:
+ s = fmt.Sprintf("%q", v)
+ default:
+ s = fmt.Sprint(v)
+ }
+ rs.recs = append(rs.recs, [2]string{a, s})
+ }
+}
+
+func (rs *records) Join() string {
+ var ss []string
+
+ // In single line mode, simply join all records with commas.
+ if !rs.allowMulti {
+ for _, r := range rs.recs {
+ ss = append(ss, r[0]+formatColon(0)+r[1])
+ }
+ return joinStrings(ss, false)
+ }
+
+ // In allowMulti line mode, align single line records for more readable output.
+ var maxLen int
+ flush := func(i int) {
+ for _, r := range rs.recs[len(ss):i] {
+ ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1])
+ }
+ maxLen = 0
+ }
+ for i, r := range rs.recs {
+ if isMulti := strings.Contains(r[1], "\n"); isMulti {
+ flush(i)
+ ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t"))
+ } else if maxLen < len(r[0]) {
+ maxLen = len(r[0])
+ }
+ }
+ flush(len(rs.recs))
+ return joinStrings(ss, true)
+}
+
+func formatColon(padding int) string {
+ // Deliberately introduce instability into the debug output to
+ // discourage users from performing string comparisons.
+ // This provides us flexibility to change the output in the future.
+ if detrand.Bool() {
+ return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0)
+ } else {
+ return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020)
+ }
+}
+
+func joinStrings(ss []string, isMulti bool) string {
+ if len(ss) == 0 {
+ return ""
+ }
+ if isMulti {
+ return "\n\t" + strings.Join(ss, "\n\t") + "\n"
+ }
+ return strings.Join(ss, ", ")
+}
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
new file mode 100644
index 0000000..8401be8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package descopts contains the nil pointers to concrete descriptor options.
+//
+// This package exists as a form of reverse dependency injection so that certain
+// packages (e.g., internal/filedesc and internal/filetype can avoid a direct
+// dependency on the descriptor proto package).
+package descopts
+
+import pref "google.golang.org/protobuf/reflect/protoreflect"
+
+// These variables are set by the init function in descriptor.pb.go via logic
+// in internal/filetype. In other words, so long as the descriptor proto package
+// is linked in, these variables will be populated.
+//
+// Each variable is populated with a nil pointer to the options struct.
+var (
+ File pref.ProtoMessage
+ Enum pref.ProtoMessage
+ EnumValue pref.ProtoMessage
+ Message pref.ProtoMessage
+ Field pref.ProtoMessage
+ Oneof pref.ProtoMessage
+ ExtensionRange pref.ProtoMessage
+ Service pref.ProtoMessage
+ Method pref.ProtoMessage
+)
diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
new file mode 100644
index 0000000..49c8676
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
@@ -0,0 +1,69 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package detrand provides deterministically random functionality.
+//
+// The pseudo-randomness of these functions is seeded by the program binary
+// itself and guarantees that the output does not change within a program,
+// while ensuring that the output is unstable across different builds.
+package detrand
+
+import (
+ "encoding/binary"
+ "hash/fnv"
+ "os"
+)
+
+// Disable disables detrand such that all functions returns the zero value.
+// This function is not concurrent-safe and must be called during program init.
+func Disable() {
+ randSeed = 0
+}
+
+// Bool returns a deterministically random boolean.
+func Bool() bool {
+ return randSeed%2 == 1
+}
+
+// Intn returns a deterministically random integer between 0 and n-1, inclusive.
+func Intn(n int) int {
+ if n <= 0 {
+ panic("must be positive")
+ }
+ return int(randSeed % uint64(n))
+}
+
+// randSeed is a best-effort at an approximate hash of the Go binary.
+var randSeed = binaryHash()
+
+func binaryHash() uint64 {
+ // Open the Go binary.
+ s, err := os.Executable()
+ if err != nil {
+ return 0
+ }
+ f, err := os.Open(s)
+ if err != nil {
+ return 0
+ }
+ defer f.Close()
+
+ // Hash the size and several samples of the Go binary.
+ const numSamples = 8
+ var buf [64]byte
+ h := fnv.New64()
+ fi, err := f.Stat()
+ if err != nil {
+ return 0
+ }
+ binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size()))
+ h.Write(buf[:8])
+ for i := int64(0); i < numSamples; i++ {
+ if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil {
+ return 0
+ }
+ h.Write(buf[:])
+ }
+ return h.Sum64()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
new file mode 100644
index 0000000..fdd9b13
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
@@ -0,0 +1,213 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package defval marshals and unmarshals textual forms of default values.
+//
+// This package handles both the form historically used in Go struct field tags
+// and also the form used by google.protobuf.FieldDescriptorProto.default_value
+// since they differ in superficial ways.
+package defval
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+
+ ptext "google.golang.org/protobuf/internal/encoding/text"
+ errors "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Format is the serialization format used to represent the default value.
+type Format int
+
+const (
+ _ Format = iota
+
+ // Descriptor uses the serialization format that protoc uses with the
+ // google.protobuf.FieldDescriptorProto.default_value field.
+ Descriptor
+
+ // GoTag uses the historical serialization format in Go struct field tags.
+ GoTag
+)
+
+// Unmarshal deserializes the default string s according to the given kind k.
+// When k is an enum, a list of enum value descriptors must be provided.
+func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) {
+ switch k {
+ case pref.BoolKind:
+ if f == GoTag {
+ switch s {
+ case "1":
+ return pref.ValueOfBool(true), nil, nil
+ case "0":
+ return pref.ValueOfBool(false), nil, nil
+ }
+ } else {
+ switch s {
+ case "true":
+ return pref.ValueOfBool(true), nil, nil
+ case "false":
+ return pref.ValueOfBool(false), nil, nil
+ }
+ }
+ case pref.EnumKind:
+ if f == GoTag {
+ // Go tags use the numeric form of the enum value.
+ if n, err := strconv.ParseInt(s, 10, 32); err == nil {
+ if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil {
+ return pref.ValueOfEnum(ev.Number()), ev, nil
+ }
+ }
+ } else {
+ // Descriptor default_value use the enum identifier.
+ ev := evs.ByName(pref.Name(s))
+ if ev != nil {
+ return pref.ValueOfEnum(ev.Number()), ev, nil
+ }
+ }
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if v, err := strconv.ParseInt(s, 10, 32); err == nil {
+ return pref.ValueOfInt32(int32(v)), nil, nil
+ }
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if v, err := strconv.ParseInt(s, 10, 64); err == nil {
+ return pref.ValueOfInt64(int64(v)), nil, nil
+ }
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if v, err := strconv.ParseUint(s, 10, 32); err == nil {
+ return pref.ValueOfUint32(uint32(v)), nil, nil
+ }
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if v, err := strconv.ParseUint(s, 10, 64); err == nil {
+ return pref.ValueOfUint64(uint64(v)), nil, nil
+ }
+ case pref.FloatKind, pref.DoubleKind:
+ var v float64
+ var err error
+ switch s {
+ case "-inf":
+ v = math.Inf(-1)
+ case "inf":
+ v = math.Inf(+1)
+ case "nan":
+ v = math.NaN()
+ default:
+ v, err = strconv.ParseFloat(s, 64)
+ }
+ if err == nil {
+ if k == pref.FloatKind {
+ return pref.ValueOfFloat32(float32(v)), nil, nil
+ } else {
+ return pref.ValueOfFloat64(float64(v)), nil, nil
+ }
+ }
+ case pref.StringKind:
+ // String values are already unescaped and can be used as is.
+ return pref.ValueOfString(s), nil, nil
+ case pref.BytesKind:
+ if b, ok := unmarshalBytes(s); ok {
+ return pref.ValueOfBytes(b), nil, nil
+ }
+ }
+ return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s)
+}
+
+// Marshal serializes v as the default string according to the given kind k.
+// When specifying the Descriptor format for an enum kind, the associated
+// enum value descriptor must be provided.
+func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) {
+ switch k {
+ case pref.BoolKind:
+ if f == GoTag {
+ if v.Bool() {
+ return "1", nil
+ } else {
+ return "0", nil
+ }
+ } else {
+ if v.Bool() {
+ return "true", nil
+ } else {
+ return "false", nil
+ }
+ }
+ case pref.EnumKind:
+ if f == GoTag {
+ return strconv.FormatInt(int64(v.Enum()), 10), nil
+ } else {
+ return string(ev.Name()), nil
+ }
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ return strconv.FormatInt(v.Int(), 10), nil
+ case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind:
+ return strconv.FormatUint(v.Uint(), 10), nil
+ case pref.FloatKind, pref.DoubleKind:
+ f := v.Float()
+ switch {
+ case math.IsInf(f, -1):
+ return "-inf", nil
+ case math.IsInf(f, +1):
+ return "inf", nil
+ case math.IsNaN(f):
+ return "nan", nil
+ default:
+ if k == pref.FloatKind {
+ return strconv.FormatFloat(f, 'g', -1, 32), nil
+ } else {
+ return strconv.FormatFloat(f, 'g', -1, 64), nil
+ }
+ }
+ case pref.StringKind:
+ // String values are serialized as is without any escaping.
+ return v.String(), nil
+ case pref.BytesKind:
+ if s, ok := marshalBytes(v.Bytes()); ok {
+ return s, nil
+ }
+ }
+ return "", errors.New("could not format value for %v: %v", k, v)
+}
+
+// unmarshalBytes deserializes bytes by applying C unescaping.
+func unmarshalBytes(s string) ([]byte, bool) {
+ // Bytes values use the same escaping as the text format,
+ // however they lack the surrounding double quotes.
+ v, err := ptext.UnmarshalString(`"` + s + `"`)
+ if err != nil {
+ return nil, false
+ }
+ return []byte(v), true
+}
+
+// marshalBytes serializes bytes by using C escaping.
+// To match the exact output of protoc, this is identical to the
+// CEscape function in strutil.cc of the protoc source code.
+func marshalBytes(b []byte) (string, bool) {
+ var s []byte
+ for _, c := range b {
+ switch c {
+ case '\n':
+ s = append(s, `\n`...)
+ case '\r':
+ s = append(s, `\r`...)
+ case '\t':
+ s = append(s, `\t`...)
+ case '"':
+ s = append(s, `\"`...)
+ case '\'':
+ s = append(s, `\'`...)
+ case '\\':
+ s = append(s, `\\`...)
+ default:
+ if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII {
+ s = append(s, c)
+ } else {
+ s = append(s, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ return string(s), true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
new file mode 100644
index 0000000..c1866f3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
@@ -0,0 +1,241 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package messageset encodes and decodes the obsolete MessageSet wire format.
+package messageset
+
+import (
+ "math"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// The MessageSet wire format is equivalent to a message defined as follows,
+// where each Item defines an extension field with a field number of 'type_id'
+// and content of 'message'. MessageSet extensions must be non-repeated message
+// fields.
+//
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// }
+// }
+const (
+ FieldItem = protowire.Number(1)
+ FieldTypeID = protowire.Number(2)
+ FieldMessage = protowire.Number(3)
+)
+
+// ExtensionName is the field name for extensions of MessageSet.
+//
+// A valid MessageSet extension must be of the form:
+// message MyMessage {
+// extend proto2.bridge.MessageSet {
+// optional MyMessage message_set_extension = 1234;
+// }
+// ...
+// }
+const ExtensionName = "message_set_extension"
+
+// IsMessageSet returns whether the message uses the MessageSet wire format.
+func IsMessageSet(md pref.MessageDescriptor) bool {
+ xmd, ok := md.(interface{ IsMessageSet() bool })
+ return ok && xmd.IsMessageSet()
+}
+
+// IsMessageSetExtension reports this field properly extends a MessageSet.
+func IsMessageSetExtension(fd pref.FieldDescriptor) bool {
+ switch {
+ case fd.Name() != ExtensionName:
+ return false
+ case !IsMessageSet(fd.ContainingMessage()):
+ return false
+ case fd.FullName().Parent() != fd.Message().FullName():
+ return false
+ }
+ return true
+}
+
+// SizeField returns the size of a MessageSet item field containing an extension
+// with the given field number, not counting the contents of the message subfield.
+func SizeField(num protowire.Number) int {
+ return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num))
+}
+
+// Unmarshal parses a MessageSet.
+//
+// It calls fn with the type ID and value of each item in the MessageSet.
+// Unknown fields are discarded.
+//
+// If wantLen is true, the item values include the varint length prefix.
+// This is ugly, but simplifies the fast-path decoder in internal/impl.
+func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error {
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return protowire.ParseError(n)
+ }
+ b = b[n:]
+ if num != FieldItem || wtyp != protowire.StartGroupType {
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return protowire.ParseError(n)
+ }
+ b = b[n:]
+ continue
+ }
+ typeID, value, n, err := ConsumeFieldValue(b, wantLen)
+ if err != nil {
+ return err
+ }
+ b = b[n:]
+ if typeID == 0 {
+ continue
+ }
+ if err := fn(typeID, value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ConsumeFieldValue parses b as a MessageSet item field value until and including
+// the trailing end group marker. It assumes the start group tag has already been parsed.
+// It returns the contents of the type_id and message subfields and the total
+// item length.
+//
+// If wantLen is true, the returned message value includes the length prefix.
+func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) {
+ ilen := len(b)
+ for {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ b = b[n:]
+ switch {
+ case num == FieldItem && wtyp == protowire.EndGroupType:
+ if wantLen && len(message) == 0 {
+ // The message field was missing, which should never happen.
+ // Be prepared for this case anyway.
+ message = protowire.AppendVarint(message, 0)
+ }
+ return typeid, message, ilen - len(b), nil
+ case num == FieldTypeID && wtyp == protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ b = b[n:]
+ if v < 1 || v > math.MaxInt32 {
+ return 0, nil, 0, errors.New("invalid type_id in message set")
+ }
+ typeid = protowire.Number(v)
+ case num == FieldMessage && wtyp == protowire.BytesType:
+ m, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ if message == nil {
+ if wantLen {
+ message = b[:n:n]
+ } else {
+ message = m[:len(m):len(m)]
+ }
+ } else {
+ // This case should never happen in practice, but handle it for
+ // correctness: The MessageSet item contains multiple message
+ // fields, which need to be merged.
+ //
+ // In the case where we're returning the length, this becomes
+ // quite inefficient since we need to strip the length off
+ // the existing data and reconstruct it with the combined length.
+ if wantLen {
+ _, nn := protowire.ConsumeVarint(message)
+ m0 := message[nn:]
+ message = nil
+ message = protowire.AppendVarint(message, uint64(len(m0)+len(m)))
+ message = append(message, m0...)
+ message = append(message, m...)
+ } else {
+ message = append(message, m...)
+ }
+ }
+ b = b[n:]
+ default:
+ // We have no place to put it, so we just ignore unknown fields.
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ b = b[n:]
+ }
+ }
+}
+
+// AppendFieldStart appends the start of a MessageSet item field containing
+// an extension with the given number. The caller must add the message
+// subfield (including the tag).
+func AppendFieldStart(b []byte, num protowire.Number) []byte {
+ b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType)
+ b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType)
+ b = protowire.AppendVarint(b, uint64(num))
+ return b
+}
+
+// AppendFieldEnd appends the trailing end group marker for a MessageSet item field.
+func AppendFieldEnd(b []byte) []byte {
+ return protowire.AppendTag(b, FieldItem, protowire.EndGroupType)
+}
+
+// SizeUnknown returns the size of an unknown fields section in MessageSet format.
+//
+// See AppendUnknown.
+func SizeUnknown(unknown []byte) (size int) {
+ for len(unknown) > 0 {
+ num, typ, n := protowire.ConsumeTag(unknown)
+ if n < 0 || typ != protowire.BytesType {
+ return 0
+ }
+ unknown = unknown[n:]
+ _, n = protowire.ConsumeBytes(unknown)
+ if n < 0 {
+ return 0
+ }
+ unknown = unknown[n:]
+ size += SizeField(num) + protowire.SizeTag(FieldMessage) + n
+ }
+ return size
+}
+
+// AppendUnknown appends unknown fields to b in MessageSet format.
+//
+// For historic reasons, unresolved items in a MessageSet are stored in a
+// message's unknown fields section in non-MessageSet format. That is, an
+// unknown item with typeID T and value V appears in the unknown fields as
+// a field with number T and value V.
+//
+// This function converts the unknown fields back into MessageSet form.
+func AppendUnknown(b, unknown []byte) ([]byte, error) {
+ for len(unknown) > 0 {
+ num, typ, n := protowire.ConsumeTag(unknown)
+ if n < 0 || typ != protowire.BytesType {
+ return nil, errors.New("invalid data in message set unknown fields")
+ }
+ unknown = unknown[n:]
+ _, n = protowire.ConsumeBytes(unknown)
+ if n < 0 {
+ return nil, errors.New("invalid data in message set unknown fields")
+ }
+ b = AppendFieldStart(b, num)
+ b = protowire.AppendTag(b, FieldMessage, protowire.BytesType)
+ b = append(b, unknown[:n]...)
+ b = AppendFieldEnd(b)
+ unknown = unknown[n:]
+ }
+ return b, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
new file mode 100644
index 0000000..38f1931
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -0,0 +1,207 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tag marshals and unmarshals the legacy struct tags as generated
+// by historical versions of protoc-gen-go.
+package tag
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+
+ defval "google.golang.org/protobuf/internal/encoding/defval"
+ fdesc "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var byteType = reflect.TypeOf(byte(0))
+
+// Unmarshal decodes the tag into a prototype.Field.
+//
+// The goType is needed to determine the original protoreflect.Kind since the
+// tag does not record sufficient information to determine that.
+// The type is the underlying field type (e.g., a repeated field may be
+// represented by []T, but the Go type passed in is just T).
+// A list of enum value descriptors must be provided for enum fields.
+// This does not populate the Enum or Message (except for weak message).
+//
+// This function is a best effort attempt; parsing errors are ignored.
+func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor {
+ f := new(fdesc.Field)
+ f.L0.ParentFile = fdesc.SurrogateProto2
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ f.L0.FullName = pref.FullName(s[len("name="):])
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ f.L1.Number = pref.FieldNumber(n)
+ case s == "opt":
+ f.L1.Cardinality = pref.Optional
+ case s == "req":
+ f.L1.Cardinality = pref.Required
+ case s == "rep":
+ f.L1.Cardinality = pref.Repeated
+ case s == "varint":
+ switch goType.Kind() {
+ case reflect.Bool:
+ f.L1.Kind = pref.BoolKind
+ case reflect.Int32:
+ f.L1.Kind = pref.Int32Kind
+ case reflect.Int64:
+ f.L1.Kind = pref.Int64Kind
+ case reflect.Uint32:
+ f.L1.Kind = pref.Uint32Kind
+ case reflect.Uint64:
+ f.L1.Kind = pref.Uint64Kind
+ }
+ case s == "zigzag32":
+ if goType.Kind() == reflect.Int32 {
+ f.L1.Kind = pref.Sint32Kind
+ }
+ case s == "zigzag64":
+ if goType.Kind() == reflect.Int64 {
+ f.L1.Kind = pref.Sint64Kind
+ }
+ case s == "fixed32":
+ switch goType.Kind() {
+ case reflect.Int32:
+ f.L1.Kind = pref.Sfixed32Kind
+ case reflect.Uint32:
+ f.L1.Kind = pref.Fixed32Kind
+ case reflect.Float32:
+ f.L1.Kind = pref.FloatKind
+ }
+ case s == "fixed64":
+ switch goType.Kind() {
+ case reflect.Int64:
+ f.L1.Kind = pref.Sfixed64Kind
+ case reflect.Uint64:
+ f.L1.Kind = pref.Fixed64Kind
+ case reflect.Float64:
+ f.L1.Kind = pref.DoubleKind
+ }
+ case s == "bytes":
+ switch {
+ case goType.Kind() == reflect.String:
+ f.L1.Kind = pref.StringKind
+ case goType.Kind() == reflect.Slice && goType.Elem() == byteType:
+ f.L1.Kind = pref.BytesKind
+ default:
+ f.L1.Kind = pref.MessageKind
+ }
+ case s == "group":
+ f.L1.Kind = pref.GroupKind
+ case strings.HasPrefix(s, "enum="):
+ f.L1.Kind = pref.EnumKind
+ case strings.HasPrefix(s, "json="):
+ jsonName := s[len("json="):]
+ if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
+ f.L1.StringName.InitJSON(jsonName)
+ }
+ case s == "packed":
+ f.L1.HasPacked = true
+ f.L1.IsPacked = true
+ case strings.HasPrefix(s, "weak="):
+ f.L1.IsWeak = true
+ f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):]))
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
+ s, i = tag[len("def="):], len(tag)
+ v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag)
+ f.L1.Default = fdesc.DefaultValue(v, ev)
+ case s == "proto3":
+ f.L0.ParentFile = fdesc.SurrogateProto3
+ }
+ tag = strings.TrimPrefix(tag[i:], ",")
+ }
+
+ // The generator uses the group message name instead of the field name.
+ // We obtain the real field name by lowercasing the group name.
+ if f.L1.Kind == pref.GroupKind {
+ f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName)))
+ }
+ return f
+}
+
+// Marshal encodes the protoreflect.FieldDescriptor as a tag.
+//
+// The enumName must be provided if the kind is an enum.
+// Historically, the formulation of the enum "name" was the proto package
+// dot-concatenated with the generated Go identifier for the enum type.
+// Depending on the context on how Marshal is called, there are different ways
+// through which that information is determined. As such it is the caller's
+// responsibility to provide a function to obtain that information.
+func Marshal(fd pref.FieldDescriptor, enumName string) string {
+ var tag []string
+ switch fd.Kind() {
+ case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind:
+ tag = append(tag, "varint")
+ case pref.Sint32Kind:
+ tag = append(tag, "zigzag32")
+ case pref.Sint64Kind:
+ tag = append(tag, "zigzag64")
+ case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind:
+ tag = append(tag, "fixed32")
+ case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind:
+ tag = append(tag, "fixed64")
+ case pref.StringKind, pref.BytesKind, pref.MessageKind:
+ tag = append(tag, "bytes")
+ case pref.GroupKind:
+ tag = append(tag, "group")
+ }
+ tag = append(tag, strconv.Itoa(int(fd.Number())))
+ switch fd.Cardinality() {
+ case pref.Optional:
+ tag = append(tag, "opt")
+ case pref.Required:
+ tag = append(tag, "req")
+ case pref.Repeated:
+ tag = append(tag, "rep")
+ }
+ if fd.IsPacked() {
+ tag = append(tag, "packed")
+ }
+ name := string(fd.Name())
+ if fd.Kind() == pref.GroupKind {
+ // The name of the FieldDescriptor for a group field is
+ // lowercased. To find the original capitalization, we
+ // look in the field's MessageType.
+ name = string(fd.Message().Name())
+ }
+ tag = append(tag, "name="+name)
+ if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() {
+ // NOTE: The jsonName != name condition is suspect, but it preserve
+ // the exact same semantics from the previous generator.
+ tag = append(tag, "json="+jsonName)
+ }
+ if fd.IsWeak() {
+ tag = append(tag, "weak="+string(fd.Message().FullName()))
+ }
+ // The previous implementation does not tag extension fields as proto3,
+ // even when the field is defined in a proto3 file. Match that behavior
+ // for consistency.
+ if fd.Syntax() == pref.Proto3 && !fd.IsExtension() {
+ tag = append(tag, "proto3")
+ }
+ if fd.Kind() == pref.EnumKind && enumName != "" {
+ tag = append(tag, "enum="+enumName)
+ }
+ if fd.ContainingOneof() != nil {
+ tag = append(tag, "oneof")
+ }
+ // This must appear last in the tag, since commas in strings aren't escaped.
+ if fd.HasDefault() {
+ def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag)
+ tag = append(tag, "def="+def)
+ }
+ return strings.Join(tag, ",")
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
new file mode 100644
index 0000000..eb10ea1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -0,0 +1,665 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// Decoder is a token-based textproto decoder.
+type Decoder struct {
+ // lastCall is last method called, either readCall or peekCall.
+ // Initial value is readCall.
+ lastCall call
+
+ // lastToken contains the last read token.
+ lastToken Token
+
+ // lastErr contains the last read error.
+ lastErr error
+
+ // openStack is a stack containing the byte characters for MessageOpen and
+ // ListOpen kinds. The top of stack represents the message or the list that
+ // the current token is nested in. An empty stack means the current token is
+ // at the top level message. The characters '{' and '<' both represent the
+ // MessageOpen kind.
+ openStack []byte
+
+ // orig is used in reporting line and column.
+ orig []byte
+ // in contains the unconsumed input.
+ in []byte
+}
+
+// NewDecoder returns a Decoder to read the given []byte.
+func NewDecoder(b []byte) *Decoder {
+ return &Decoder{orig: b, in: b}
+}
+
+// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
+var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
+
+// call specifies which Decoder method was invoked.
+type call uint8
+
+const (
+ readCall call = iota
+ peekCall
+)
+
+// Peek looks ahead and returns the next token and error without advancing a read.
+func (d *Decoder) Peek() (Token, error) {
+ defer func() { d.lastCall = peekCall }()
+ if d.lastCall == readCall {
+ d.lastToken, d.lastErr = d.Read()
+ }
+ return d.lastToken, d.lastErr
+}
+
+// Read returns the next token.
+// It will return an error if there is no valid token.
+func (d *Decoder) Read() (Token, error) {
+ defer func() { d.lastCall = readCall }()
+ if d.lastCall == peekCall {
+ return d.lastToken, d.lastErr
+ }
+
+ tok, err := d.parseNext(d.lastToken.kind)
+ if err != nil {
+ return Token{}, err
+ }
+
+ switch tok.kind {
+ case comma, semicolon:
+ tok, err = d.parseNext(tok.kind)
+ if err != nil {
+ return Token{}, err
+ }
+ }
+ d.lastToken = tok
+ return tok, nil
+}
+
+const (
+ mismatchedFmt = "mismatched close character %q"
+ unexpectedFmt = "unexpected character %q"
+)
+
+// parseNext parses the next Token based on given last kind.
+func (d *Decoder) parseNext(lastKind Kind) (Token, error) {
+ // Trim leading spaces.
+ d.consume(0)
+ isEOF := false
+ if len(d.in) == 0 {
+ isEOF = true
+ }
+
+ switch lastKind {
+ case EOF:
+ return d.consumeToken(EOF, 0, 0), nil
+
+ case bof:
+ // Start of top level message. Next token can be EOF or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ return d.parseFieldName()
+
+ case Name:
+ // Next token can be MessageOpen, ListOpen or Scalar.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case '{', '<':
+ d.pushOpenStack(ch)
+ return d.consumeToken(MessageOpen, 1, 0), nil
+ case '[':
+ d.pushOpenStack(ch)
+ return d.consumeToken(ListOpen, 1, 0), nil
+ default:
+ return d.parseScalar()
+ }
+
+ case Scalar:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message.
+ // Next token can be EOF, comma, semicolon or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ switch d.in[0] {
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose, comma, semicolon or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case ListOpen:
+ // Next token can be ListClose or comma.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case ']':
+ d.popOpenStack()
+ return d.consumeToken(ListClose, 1, 0), nil
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ default:
+ return Token{}, d.newSyntaxError(unexpectedFmt, ch)
+ }
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ _, closeCh := d.currentOpenKind()
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageClose:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message.
+ // Next token can be EOF, comma, semicolon or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ switch ch := d.in[0]; ch {
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose, comma, semicolon or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case ListOpen:
+ // Next token can be ListClose or comma
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(ListClose, 1, 0), nil
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ default:
+ return Token{}, d.newSyntaxError(unexpectedFmt, ch)
+ }
+ }
+
+ case ListOpen:
+ // Next token can be ListClose, MessageStart or Scalar.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case ']':
+ d.popOpenStack()
+ return d.consumeToken(ListClose, 1, 0), nil
+ case '{', '<':
+ d.pushOpenStack(ch)
+ return d.consumeToken(MessageOpen, 1, 0), nil
+ default:
+ return d.parseScalar()
+ }
+
+ case ListClose:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message.
+ // Next token can be EOF, comma, semicolon or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ switch ch := d.in[0]; ch {
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose, comma, semicolon or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ default:
+ // It is not possible to have this case. Let it panic below.
+ }
+
+ case comma, semicolon:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message. Next token can be EOF or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ return d.parseFieldName()
+
+ case MessageOpen:
+ // Next token can be MessageClose or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ default:
+ return d.parseFieldName()
+ }
+
+ case ListOpen:
+ if lastKind == semicolon {
+ // It is not be possible to have this case as logic here
+ // should not have produced a semicolon Token when inside a
+ // list. Let it panic below.
+ break
+ }
+ // Next token can be MessageOpen or Scalar.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case '{', '<':
+ d.pushOpenStack(ch)
+ return d.consumeToken(MessageOpen, 1, 0), nil
+ default:
+ return d.parseScalar()
+ }
+ }
+ }
+
+ line, column := d.Position(len(d.orig) - len(d.in))
+ panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind))
+}
+
+var otherCloseChar = map[byte]byte{
+ '}': '>',
+ '>': '}',
+}
+
+// currentOpenKind indicates whether current position is inside a message, list
+// or top-level message by returning MessageOpen, ListOpen or bof respectively.
+// If the returned kind is either a MessageOpen or ListOpen, it also returns the
+// corresponding closing character.
+func (d *Decoder) currentOpenKind() (Kind, byte) {
+ if len(d.openStack) == 0 {
+ return bof, 0
+ }
+ openCh := d.openStack[len(d.openStack)-1]
+ switch openCh {
+ case '{':
+ return MessageOpen, '}'
+ case '<':
+ return MessageOpen, '>'
+ case '[':
+ return ListOpen, ']'
+ }
+ panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh)))
+}
+
+func (d *Decoder) pushOpenStack(ch byte) {
+ d.openStack = append(d.openStack, ch)
+}
+
+func (d *Decoder) popOpenStack() {
+ d.openStack = d.openStack[:len(d.openStack)-1]
+}
+
+// parseFieldName parses field name and separator.
+func (d *Decoder) parseFieldName() (tok Token, err error) {
+ defer func() {
+ if err == nil && d.tryConsumeChar(':') {
+ tok.attrs |= hasSeparator
+ }
+ }()
+
+ // Extension or Any type URL.
+ if d.in[0] == '[' {
+ return d.parseTypeName()
+ }
+
+ // Identifier.
+ if size := parseIdent(d.in, false); size > 0 {
+ return d.consumeToken(Name, size, uint8(IdentName)), nil
+ }
+
+ // Field number. Identify if input is a valid number that is not negative
+ // and is decimal integer within 32-bit range.
+ if num := parseNumber(d.in); num.size > 0 {
+ if !num.neg && num.kind == numDec {
+ if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil {
+ return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil
+ }
+ }
+ return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size])
+ }
+
+ return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in))
+}
+
+// parseTypeName parses Any type URL or extension field name. The name is
+// enclosed in [ and ] characters. The C++ parser does not handle many legal URL
+// strings. This implementation is more liberal and allows for the pattern
+// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed
+// in between [ ], '.', '/' and the sub names.
+func (d *Decoder) parseTypeName() (Token, error) {
+ startPos := len(d.orig) - len(d.in)
+ // Use alias s to advance first in order to use d.in for error handling.
+ // Caller already checks for [ as first character.
+ s := consume(d.in[1:], 0)
+ if len(s) == 0 {
+ return Token{}, ErrUnexpectedEOF
+ }
+
+ var name []byte
+ for len(s) > 0 && isTypeNameChar(s[0]) {
+ name = append(name, s[0])
+ s = s[1:]
+ }
+ s = consume(s, 0)
+
+ var closed bool
+ for len(s) > 0 && !closed {
+ switch {
+ case s[0] == ']':
+ s = s[1:]
+ closed = true
+
+ case s[0] == '/', s[0] == '.':
+ if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') {
+ return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
+ d.orig[startPos:len(d.orig)-len(s)+1])
+ }
+ name = append(name, s[0])
+ s = s[1:]
+ s = consume(s, 0)
+ for len(s) > 0 && isTypeNameChar(s[0]) {
+ name = append(name, s[0])
+ s = s[1:]
+ }
+ s = consume(s, 0)
+
+ default:
+ return Token{}, d.newSyntaxError(
+ "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1])
+ }
+ }
+
+ if !closed {
+ return Token{}, ErrUnexpectedEOF
+ }
+
+ // First character cannot be '.'. Last character cannot be '.' or '/'.
+ size := len(name)
+ if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' {
+ return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
+ d.orig[startPos:len(d.orig)-len(s)])
+ }
+
+ d.in = s
+ endPos := len(d.orig) - len(d.in)
+ d.consume(0)
+
+ return Token{
+ kind: Name,
+ attrs: uint8(TypeName),
+ pos: startPos,
+ raw: d.orig[startPos:endPos],
+ str: string(name),
+ }, nil
+}
+
+func isTypeNameChar(b byte) bool {
+ return (b == '-' || b == '_' ||
+ ('0' <= b && b <= '9') ||
+ ('a' <= b && b <= 'z') ||
+ ('A' <= b && b <= 'Z'))
+}
+
+func isWhiteSpace(b byte) bool {
+ switch b {
+ case ' ', '\n', '\r', '\t':
+ return true
+ default:
+ return false
+ }
+}
+
+// parseIdent parses an unquoted proto identifier and returns size.
+// If allowNeg is true, it allows '-' to be the first character in the
+// identifier. This is used when parsing literal values like -infinity, etc.
+// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*`
+func parseIdent(input []byte, allowNeg bool) int {
+ var size int
+
+ s := input
+ if len(s) == 0 {
+ return 0
+ }
+
+ if allowNeg && s[0] == '-' {
+ s = s[1:]
+ size++
+ if len(s) == 0 {
+ return 0
+ }
+ }
+
+ switch {
+ case s[0] == '_',
+ 'a' <= s[0] && s[0] <= 'z',
+ 'A' <= s[0] && s[0] <= 'Z':
+ s = s[1:]
+ size++
+ default:
+ return 0
+ }
+
+ for len(s) > 0 && (s[0] == '_' ||
+ 'a' <= s[0] && s[0] <= 'z' ||
+ 'A' <= s[0] && s[0] <= 'Z' ||
+ '0' <= s[0] && s[0] <= '9') {
+ s = s[1:]
+ size++
+ }
+
+ if len(s) > 0 && !isDelim(s[0]) {
+ return 0
+ }
+
+ return size
+}
+
+// parseScalar parses for a string, literal or number value.
+func (d *Decoder) parseScalar() (Token, error) {
+ if d.in[0] == '"' || d.in[0] == '\'' {
+ return d.parseStringValue()
+ }
+
+ if tok, ok := d.parseLiteralValue(); ok {
+ return tok, nil
+ }
+
+ if tok, ok := d.parseNumberValue(); ok {
+ return tok, nil
+ }
+
+ return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in))
+}
+
+// parseLiteralValue parses a literal value. A literal value is used for
+// bools, special floats and enums. This function simply identifies that the
+// field value is a literal.
+func (d *Decoder) parseLiteralValue() (Token, bool) {
+ size := parseIdent(d.in, true)
+ if size == 0 {
+ return Token{}, false
+ }
+ return d.consumeToken(Scalar, size, literalValue), true
+}
+
+// consumeToken constructs a Token for given Kind from d.in and consumes given
+// size-length from it.
+func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
+ // Important to compute raw and pos before consuming.
+ tok := Token{
+ kind: kind,
+ attrs: attrs,
+ pos: len(d.orig) - len(d.in),
+ raw: d.in[:size],
+ }
+ d.consume(size)
+ return tok
+}
+
+// newSyntaxError returns a syntax error with line and column information for
+// current position.
+func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
+ e := errors.New(f, x...)
+ line, column := d.Position(len(d.orig) - len(d.in))
+ return errors.New("syntax error (line %d:%d): %v", line, column, e)
+}
+
+// Position returns line and column number of given index of the original input.
+// It will panic if index is out of range.
+func (d *Decoder) Position(idx int) (line int, column int) {
+ b := d.orig[:idx]
+ line = bytes.Count(b, []byte("\n")) + 1
+ if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
+ b = b[i+1:]
+ }
+ column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
+ return line, column
+}
+
+func (d *Decoder) tryConsumeChar(c byte) bool {
+ if len(d.in) > 0 && d.in[0] == c {
+ d.consume(1)
+ return true
+ }
+ return false
+}
+
+// consume consumes n bytes of input and any subsequent whitespace or comments.
+func (d *Decoder) consume(n int) {
+ d.in = consume(d.in, n)
+ return
+}
+
+// consume consumes n bytes of input and any subsequent whitespace or comments.
+func consume(b []byte, n int) []byte {
+ b = b[n:]
+ for len(b) > 0 {
+ switch b[0] {
+ case ' ', '\n', '\r', '\t':
+ b = b[1:]
+ case '#':
+ if i := bytes.IndexByte(b, '\n'); i >= 0 {
+ b = b[i+len("\n"):]
+ } else {
+ b = nil
+ }
+ default:
+ return b
+ }
+ }
+ return b
+}
+
+// Any sequence that looks like a non-delimiter (for error reporting).
+var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`)
+
+// isDelim returns true if given byte is a delimiter character.
+func isDelim(c byte) bool {
+ return !(c == '-' || c == '+' || c == '.' || c == '_' ||
+ ('a' <= c && c <= 'z') ||
+ ('A' <= c && c <= 'Z') ||
+ ('0' <= c && c <= '9'))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
new file mode 100644
index 0000000..f2d90b7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
@@ -0,0 +1,190 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+// parseNumberValue parses a number from the input and returns a Token object.
+func (d *Decoder) parseNumberValue() (Token, bool) {
+ in := d.in
+ num := parseNumber(in)
+ if num.size == 0 {
+ return Token{}, false
+ }
+ numAttrs := num.kind
+ if num.neg {
+ numAttrs |= isNegative
+ }
+ strSize := num.size
+ last := num.size - 1
+ if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') {
+ strSize = last
+ }
+ tok := Token{
+ kind: Scalar,
+ attrs: numberValue,
+ pos: len(d.orig) - len(d.in),
+ raw: d.in[:num.size],
+ str: string(d.in[:strSize]),
+ numAttrs: numAttrs,
+ }
+ d.consume(num.size)
+ return tok, true
+}
+
+const (
+ numDec uint8 = (1 << iota) / 2
+ numHex
+ numOct
+ numFloat
+)
+
+// number is the result of parsing out a valid number from parseNumber. It
+// contains data for doing float or integer conversion via the strconv package
+// in conjunction with the input bytes.
+type number struct {
+ kind uint8
+ neg bool
+ size int
+}
+
+// parseNumber constructs a number object from given input. It allows for the
+// following patterns:
+// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*)
+// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?)
+// It also returns the number of parsed bytes for the given number, 0 if it is
+// not a number.
+func parseNumber(input []byte) number {
+ kind := numDec
+ var size int
+ var neg bool
+
+ s := input
+ if len(s) == 0 {
+ return number{}
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ neg = true
+ s = s[1:]
+ size++
+ if len(s) == 0 {
+ return number{}
+ }
+ }
+
+ // C++ allows for whitespace and comments in between the negative sign and
+ // the rest of the number. This logic currently does not but is consistent
+ // with v1.
+
+ switch {
+ case s[0] == '0':
+ if len(s) > 1 {
+ switch {
+ case s[1] == 'x' || s[1] == 'X':
+ // Parse as hex number.
+ kind = numHex
+ n := 2
+ s = s[2:]
+ for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') ||
+ ('a' <= s[0] && s[0] <= 'f') ||
+ ('A' <= s[0] && s[0] <= 'F')) {
+ s = s[1:]
+ n++
+ }
+ if n == 2 {
+ return number{}
+ }
+ size += n
+
+ case '0' <= s[1] && s[1] <= '7':
+ // Parse as octal number.
+ kind = numOct
+ n := 2
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '7' {
+ s = s[1:]
+ n++
+ }
+ size += n
+ }
+
+ if kind&(numHex|numOct) > 0 {
+ if len(s) > 0 && !isDelim(s[0]) {
+ return number{}
+ }
+ return number{kind: kind, neg: neg, size: size}
+ }
+ }
+ s = s[1:]
+ size++
+
+ case '1' <= s[0] && s[0] <= '9':
+ n := 1
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ size += n
+
+ case s[0] == '.':
+ // Set kind to numFloat to signify the intent to parse as float. And
+ // that it needs to have other digits after '.'.
+ kind = numFloat
+
+ default:
+ return number{}
+ }
+
+ // . followed by 0 or more digits.
+ if len(s) > 0 && s[0] == '.' {
+ n := 1
+ s = s[1:]
+ // If decimal point was before any digits, it should be followed by
+ // other digits.
+ if len(s) == 0 && kind == numFloat {
+ return number{}
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ size += n
+ kind = numFloat
+ }
+
+ // e or E followed by an optional - or + and 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ kind = numFloat
+ s = s[1:]
+ n := 1
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ n++
+ if len(s) == 0 {
+ return number{}
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ size += n
+ }
+
+ // Optional suffix f or F for floats.
+ if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') {
+ kind = numFloat
+ s = s[1:]
+ size++
+ }
+
+ // Check that next byte is a delimiter or it is at the end.
+ if len(s) > 0 && !isDelim(s[0]) {
+ return number{}
+ }
+
+ return number{kind: kind, neg: neg, size: size}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go
new file mode 100644
index 0000000..d4d3490
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go
@@ -0,0 +1,161 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "bytes"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/strs"
+)
+
+// parseStringValue parses string field token.
+// This differs from parseString since the text format allows
+// multiple back-to-back string literals where they are semantically treated
+// as a single large string with all values concatenated.
+//
+// E.g., `"foo" "bar" "baz"` => "foobarbaz"
+func (d *Decoder) parseStringValue() (Token, error) {
+ // Note that the ending quote is sufficient to unambiguously mark the end
+ // of a string. Thus, the text grammar does not require intervening
+ // whitespace or control characters in-between strings.
+ // Thus, the following is valid:
+ // `"foo"'bar'"baz"` => "foobarbaz"
+ in0 := d.in
+ var ss []string
+ for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') {
+ s, err := d.parseString()
+ if err != nil {
+ return Token{}, err
+ }
+ ss = append(ss, s)
+ }
+ // d.in already points to the end of the value at this point.
+ return Token{
+ kind: Scalar,
+ attrs: stringValue,
+ pos: len(d.orig) - len(in0),
+ raw: in0[:len(in0)-len(d.in)],
+ str: strings.Join(ss, ""),
+ }, nil
+}
+
+// parseString parses a string value enclosed in " or '.
+func (d *Decoder) parseString() (string, error) {
+ in := d.in
+ if len(in) == 0 {
+ return "", ErrUnexpectedEOF
+ }
+ quote := in[0]
+ in = in[1:]
+ i := indexNeedEscapeInBytes(in)
+ in, out := in[i:], in[:i:i] // set cap to prevent mutations
+ for len(in) > 0 {
+ switch r, n := utf8.DecodeRune(in); {
+ case r == utf8.RuneError && n == 1:
+ return "", d.newSyntaxError("invalid UTF-8 detected")
+ case r == 0 || r == '\n':
+ return "", d.newSyntaxError("invalid character %q in string", r)
+ case r == rune(quote):
+ in = in[1:]
+ d.consume(len(d.in) - len(in))
+ return string(out), nil
+ case r == '\\':
+ if len(in) < 2 {
+ return "", ErrUnexpectedEOF
+ }
+ switch r := in[1]; r {
+ case '"', '\'', '\\', '?':
+ in, out = in[2:], append(out, r)
+ case 'a':
+ in, out = in[2:], append(out, '\a')
+ case 'b':
+ in, out = in[2:], append(out, '\b')
+ case 'n':
+ in, out = in[2:], append(out, '\n')
+ case 'r':
+ in, out = in[2:], append(out, '\r')
+ case 't':
+ in, out = in[2:], append(out, '\t')
+ case 'v':
+ in, out = in[2:], append(out, '\v')
+ case 'f':
+ in, out = in[2:], append(out, '\f')
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // One, two, or three octal characters.
+ n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567"))
+ if n > 3 {
+ n = 3
+ }
+ v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8)
+ if err != nil {
+ return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n])
+ }
+ in, out = in[1+n:], append(out, byte(v))
+ case 'x':
+ // One or two hexadecimal characters.
+ n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF"))
+ if n > 2 {
+ n = 2
+ }
+ v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8)
+ if err != nil {
+ return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n])
+ }
+ in, out = in[2+n:], append(out, byte(v))
+ case 'u', 'U':
+ // Four or eight hexadecimal characters
+ n := 6
+ if r == 'U' {
+ n = 10
+ }
+ if len(in) < n {
+ return "", ErrUnexpectedEOF
+ }
+ v, err := strconv.ParseUint(string(in[2:n]), 16, 32)
+ if utf8.MaxRune < v || err != nil {
+ return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n])
+ }
+ in = in[n:]
+
+ r := rune(v)
+ if utf16.IsSurrogate(r) {
+ if len(in) < 6 {
+ return "", ErrUnexpectedEOF
+ }
+ v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
+ r = utf16.DecodeRune(r, rune(v))
+ if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil {
+ return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6])
+ }
+ in = in[6:]
+ }
+ out = append(out, string(r)...)
+ default:
+ return "", d.newSyntaxError("invalid escape code %q in string", in[:2])
+ }
+ default:
+ i := indexNeedEscapeInBytes(in[n:])
+ in, out = in[n+i:], append(out, in[:n+i]...)
+ }
+ }
+ return "", ErrUnexpectedEOF
+}
+
+// indexNeedEscapeInString returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
+
+// UnmarshalString returns an unescaped string given a textproto string value.
+// String value needs to contain single or double quotes. This is only used by
+// internal/encoding/defval package for unmarshaling bytes.
+func UnmarshalString(s string) (string, error) {
+ d := NewDecoder([]byte(s))
+ return d.parseString()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go
new file mode 100644
index 0000000..83d2b0d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go
@@ -0,0 +1,373 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/internal/flags"
+)
+
+// Kind represents a token kind expressible in the textproto format.
+type Kind uint8
+
+// Kind values.
+const (
+ Invalid Kind = iota
+ EOF
+ Name // Name indicates the field name.
+ Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true.
+ MessageOpen
+ MessageClose
+ ListOpen
+ ListClose
+
+ // comma and semi-colon are only for parsing in between values and should not be exposed.
+ comma
+ semicolon
+
+ // bof indicates beginning of file, which is the default token
+ // kind at the beginning of parsing.
+ bof = Invalid
+)
+
+func (t Kind) String() string {
+ switch t {
+ case Invalid:
+ return ""
+ case EOF:
+ return "eof"
+ case Scalar:
+ return "scalar"
+ case Name:
+ return "name"
+ case MessageOpen:
+ return "{"
+ case MessageClose:
+ return "}"
+ case ListOpen:
+ return "["
+ case ListClose:
+ return "]"
+ case comma:
+ return ","
+ case semicolon:
+ return ";"
+ default:
+ return fmt.Sprintf("", uint8(t))
+ }
+}
+
+// NameKind represents different types of field names.
+type NameKind uint8
+
+// NameKind values.
+const (
+ IdentName NameKind = iota + 1
+ TypeName
+ FieldNumber
+)
+
+func (t NameKind) String() string {
+ switch t {
+ case IdentName:
+ return "IdentName"
+ case TypeName:
+ return "TypeName"
+ case FieldNumber:
+ return "FieldNumber"
+ default:
+ return fmt.Sprintf("", uint8(t))
+ }
+}
+
+// Bit mask in Token.attrs to indicate if a Name token is followed by the
+// separator char ':'. The field name separator char is optional for message
+// field or repeated message field, but required for all other types. Decoder
+// simply indicates whether a Name token is followed by separator or not. It is
+// up to the prototext package to validate.
+const hasSeparator = 1 << 7
+
+// Scalar value types.
+const (
+ numberValue = iota + 1
+ stringValue
+ literalValue
+)
+
+// Bit mask in Token.numAttrs to indicate that the number is a negative.
+const isNegative = 1 << 7
+
+// Token provides a parsed token kind and value. Values are provided by the
+// different accessor methods.
+type Token struct {
+ // Kind of the Token object.
+ kind Kind
+ // attrs contains metadata for the following Kinds:
+ // Name: hasSeparator bit and one of NameKind.
+ // Scalar: one of numberValue, stringValue, literalValue.
+ attrs uint8
+ // numAttrs contains metadata for numberValue:
+ // - highest bit is whether negative or positive.
+ // - lower bits indicate one of numDec, numHex, numOct, numFloat.
+ numAttrs uint8
+ // pos provides the position of the token in the original input.
+ pos int
+ // raw bytes of the serialized token.
+ // This is a subslice into the original input.
+ raw []byte
+ // str contains parsed string for the following:
+ // - stringValue of Scalar kind
+ // - numberValue of Scalar kind
+ // - TypeName of Name kind
+ str string
+}
+
+// Kind returns the token kind.
+func (t Token) Kind() Kind {
+ return t.kind
+}
+
+// RawString returns the read value in string.
+func (t Token) RawString() string {
+ return string(t.raw)
+}
+
+// Pos returns the token position from the input.
+func (t Token) Pos() int {
+ return t.pos
+}
+
+// NameKind returns IdentName, TypeName or FieldNumber.
+// It panics if type is not Name.
+func (t Token) NameKind() NameKind {
+ if t.kind == Name {
+ return NameKind(t.attrs &^ hasSeparator)
+ }
+ panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
+}
+
+// HasSeparator returns true if the field name is followed by the separator char
+// ':', else false. It panics if type is not Name.
+func (t Token) HasSeparator() bool {
+ if t.kind == Name {
+ return t.attrs&hasSeparator != 0
+ }
+ panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
+}
+
+// IdentName returns the value for IdentName type.
+func (t Token) IdentName() string {
+ if t.kind == Name && t.attrs&uint8(IdentName) != 0 {
+ return string(t.raw)
+ }
+ panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
+}
+
+// TypeName returns the value for TypeName type.
+func (t Token) TypeName() string {
+ if t.kind == Name && t.attrs&uint8(TypeName) != 0 {
+ return t.str
+ }
+ panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
+}
+
+// FieldNumber returns the value for FieldNumber type. It returns a
+// non-negative int32 value. Caller will still need to validate for the correct
+// field number range.
+func (t Token) FieldNumber() int32 {
+ if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 {
+ panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
+ }
+ // Following should not return an error as it had already been called right
+ // before this Token was constructed.
+ num, _ := strconv.ParseInt(string(t.raw), 10, 32)
+ return int32(num)
+}
+
+// String returns the string value for a Scalar type.
+func (t Token) String() (string, bool) {
+ if t.kind != Scalar || t.attrs != stringValue {
+ return "", false
+ }
+ return t.str, true
+}
+
+// Enum returns the literal value for a Scalar type for use as enum literals.
+func (t Token) Enum() (string, bool) {
+ if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') {
+ return "", false
+ }
+ return string(t.raw), true
+}
+
+// Bool returns the bool value for a Scalar type.
+func (t Token) Bool() (bool, bool) {
+ if t.kind != Scalar {
+ return false, false
+ }
+ switch t.attrs {
+ case literalValue:
+ if b, ok := boolLits[string(t.raw)]; ok {
+ return b, true
+ }
+ case numberValue:
+ // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01,
+ // 0x1, etc.
+ n, err := strconv.ParseUint(t.str, 0, 64)
+ if err == nil {
+ switch n {
+ case 0:
+ return false, true
+ case 1:
+ return true, true
+ }
+ }
+ }
+ return false, false
+}
+
+// These exact boolean literals are the ones supported in C++.
+var boolLits = map[string]bool{
+ "t": true,
+ "true": true,
+ "True": true,
+ "f": false,
+ "false": false,
+ "False": false,
+}
+
+// Uint64 returns the uint64 value for a Scalar type.
+func (t Token) Uint64() (uint64, bool) {
+ if t.kind != Scalar || t.attrs != numberValue ||
+ t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ n, err := strconv.ParseUint(t.str, 0, 64)
+ if err != nil {
+ return 0, false
+ }
+ return n, true
+}
+
+// Uint32 returns the uint32 value for a Scalar type.
+func (t Token) Uint32() (uint32, bool) {
+ if t.kind != Scalar || t.attrs != numberValue ||
+ t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ n, err := strconv.ParseUint(t.str, 0, 32)
+ if err != nil {
+ return 0, false
+ }
+ return uint32(n), true
+}
+
+// Int64 returns the int64 value for a Scalar type.
+func (t Token) Int64() (int64, bool) {
+ if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ if n, err := strconv.ParseInt(t.str, 0, 64); err == nil {
+ return n, true
+ }
+ // C++ accepts large positive hex numbers as negative values.
+ // This feature is here for proto1 backwards compatibility purposes.
+ if flags.ProtoLegacy && (t.numAttrs == numHex) {
+ if n, err := strconv.ParseUint(t.str, 0, 64); err == nil {
+ return int64(n), true
+ }
+ }
+ return 0, false
+}
+
+// Int32 returns the int32 value for a Scalar type.
+func (t Token) Int32() (int32, bool) {
+ if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ if n, err := strconv.ParseInt(t.str, 0, 32); err == nil {
+ return int32(n), true
+ }
+ // C++ accepts large positive hex numbers as negative values.
+ // This feature is here for proto1 backwards compatibility purposes.
+ if flags.ProtoLegacy && (t.numAttrs == numHex) {
+ if n, err := strconv.ParseUint(t.str, 0, 32); err == nil {
+ return int32(n), true
+ }
+ }
+ return 0, false
+}
+
+// Float64 returns the float64 value for a Scalar type.
+func (t Token) Float64() (float64, bool) {
+ if t.kind != Scalar {
+ return 0, false
+ }
+ switch t.attrs {
+ case literalValue:
+ if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
+ return f, true
+ }
+ case numberValue:
+ n, err := strconv.ParseFloat(t.str, 64)
+ if err == nil {
+ return n, true
+ }
+ nerr := err.(*strconv.NumError)
+ if nerr.Err == strconv.ErrRange {
+ return n, true
+ }
+ }
+ return 0, false
+}
+
+// Float32 returns the float32 value for a Scalar type.
+func (t Token) Float32() (float32, bool) {
+ if t.kind != Scalar {
+ return 0, false
+ }
+ switch t.attrs {
+ case literalValue:
+ if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
+ return float32(f), true
+ }
+ case numberValue:
+ n, err := strconv.ParseFloat(t.str, 64)
+ if err == nil {
+ // Overflows are treated as (-)infinity.
+ return float32(n), true
+ }
+ nerr := err.(*strconv.NumError)
+ if nerr.Err == strconv.ErrRange {
+ return float32(n), true
+ }
+ }
+ return 0, false
+}
+
+// These are the supported float literals which C++ permits case-insensitive
+// variants of these.
+var floatLits = map[string]float64{
+ "nan": math.NaN(),
+ "inf": math.Inf(1),
+ "infinity": math.Inf(1),
+ "-inf": math.Inf(-1),
+ "-infinity": math.Inf(-1),
+}
+
+// TokenEquals returns true if given Tokens are equal, else false.
+func TokenEquals(x, y Token) bool {
+ return x.kind == y.kind &&
+ x.attrs == y.attrs &&
+ x.numAttrs == y.numAttrs &&
+ x.pos == y.pos &&
+ bytes.Equal(x.raw, y.raw) &&
+ x.str == y.str
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
new file mode 100644
index 0000000..0ce8d6f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package text implements the text format for protocol buffers.
+// This package has no semantic understanding for protocol buffers and is only
+// a parser and composer for the format.
+//
+// There is no formal specification for the protobuf text format, as such the
+// C++ implementation (see google::protobuf::TextFormat) is the reference
+// implementation of the text format.
+//
+// This package is neither a superset nor a subset of the C++ implementation.
+// This implementation permits a more liberal grammar in some cases to be
+// backwards compatible with the historical Go implementation.
+// Future parsings unique to Go should not be added.
+// Some grammars allowed by the C++ implementation are deliberately
+// not implemented here because they are considered a bug by the protobuf team
+// and should not be replicated.
+//
+// The Go implementation should implement a sufficient amount of the C++
+// grammar such that the default text serialization by C++ can be parsed by Go.
+// However, just because the C++ parser accepts some input does not mean that
+// the Go implementation should as well.
+//
+// The text format is almost a superset of JSON except:
+// * message keys are not quoted strings, but identifiers
+// * the top-level value must be a message without the delimiters
+package text
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
new file mode 100644
index 0000000..aa66bdd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
@@ -0,0 +1,265 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "math"
+ "math/bits"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/detrand"
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// encType represents an encoding type.
+type encType uint8
+
+const (
+ _ encType = (1 << iota) / 2
+ name
+ scalar
+ messageOpen
+ messageClose
+)
+
+// Encoder provides methods to write out textproto constructs and values. The user is
+// responsible for producing valid sequences of constructs and values.
+type Encoder struct {
+ encoderState
+
+ indent string
+ delims [2]byte
+ outputASCII bool
+}
+
+type encoderState struct {
+ lastType encType
+ indents []byte
+ out []byte
+}
+
+// NewEncoder returns an Encoder.
+//
+// If indent is a non-empty string, it causes every entry in a List or Message
+// to be preceded by the indent and trailed by a newline.
+//
+// If delims is not the zero value, it controls the delimiter characters used
+// for messages (e.g., "{}" vs "<>").
+//
+// If outputASCII is true, strings will be serialized in such a way that
+// multi-byte UTF-8 sequences are escaped. This property ensures that the
+// overall output is ASCII (as opposed to UTF-8).
+func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) {
+ e := &Encoder{}
+ if len(indent) > 0 {
+ if strings.Trim(indent, " \t") != "" {
+ return nil, errors.New("indent may only be composed of space and tab characters")
+ }
+ e.indent = indent
+ }
+ switch delims {
+ case [2]byte{0, 0}:
+ e.delims = [2]byte{'{', '}'}
+ case [2]byte{'{', '}'}, [2]byte{'<', '>'}:
+ e.delims = delims
+ default:
+ return nil, errors.New("delimiters may only be \"{}\" or \"<>\"")
+ }
+ e.outputASCII = outputASCII
+
+ return e, nil
+}
+
+// Bytes returns the content of the written bytes.
+func (e *Encoder) Bytes() []byte {
+ return e.out
+}
+
+// StartMessage writes out the '{' or '<' symbol.
+func (e *Encoder) StartMessage() {
+ e.prepareNext(messageOpen)
+ e.out = append(e.out, e.delims[0])
+}
+
+// EndMessage writes out the '}' or '>' symbol.
+func (e *Encoder) EndMessage() {
+ e.prepareNext(messageClose)
+ e.out = append(e.out, e.delims[1])
+}
+
+// WriteName writes out the field name and the separator ':'.
+func (e *Encoder) WriteName(s string) {
+ e.prepareNext(name)
+ e.out = append(e.out, s...)
+ e.out = append(e.out, ':')
+}
+
+// WriteBool writes out the given boolean value.
+func (e *Encoder) WriteBool(b bool) {
+ if b {
+ e.WriteLiteral("true")
+ } else {
+ e.WriteLiteral("false")
+ }
+}
+
+// WriteString writes out the given string value.
+func (e *Encoder) WriteString(s string) {
+ e.prepareNext(scalar)
+ e.out = appendString(e.out, s, e.outputASCII)
+}
+
+func appendString(out []byte, in string, outputASCII bool) []byte {
+ out = append(out, '"')
+ i := indexNeedEscapeInString(in)
+ in, out = in[i:], append(out, in[:i]...)
+ for len(in) > 0 {
+ switch r, n := utf8.DecodeRuneInString(in); {
+ case r == utf8.RuneError && n == 1:
+ // We do not report invalid UTF-8 because strings in the text format
+ // are used to represent both the proto string and bytes type.
+ r = rune(in[0])
+ fallthrough
+ case r < ' ' || r == '"' || r == '\\' || r == 0x7f:
+ out = append(out, '\\')
+ switch r {
+ case '"', '\\':
+ out = append(out, byte(r))
+ case '\n':
+ out = append(out, 'n')
+ case '\r':
+ out = append(out, 'r')
+ case '\t':
+ out = append(out, 't')
+ default:
+ out = append(out, 'x')
+ out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ }
+ in = in[n:]
+ case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f):
+ out = append(out, '\\')
+ if r <= math.MaxUint16 {
+ out = append(out, 'u')
+ out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ } else {
+ out = append(out, 'U')
+ out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ }
+ in = in[n:]
+ default:
+ i := indexNeedEscapeInString(in[n:])
+ in, out = in[n+i:], append(out, in[:n+i]...)
+ }
+ }
+ out = append(out, '"')
+ return out
+}
+
+// indexNeedEscapeInString returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInString(s string) int {
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f {
+ return i
+ }
+ }
+ return len(s)
+}
+
+// WriteFloat writes out the given float value for given bitSize.
+func (e *Encoder) WriteFloat(n float64, bitSize int) {
+ e.prepareNext(scalar)
+ e.out = appendFloat(e.out, n, bitSize)
+}
+
+func appendFloat(out []byte, n float64, bitSize int) []byte {
+ switch {
+ case math.IsNaN(n):
+ return append(out, "nan"...)
+ case math.IsInf(n, +1):
+ return append(out, "inf"...)
+ case math.IsInf(n, -1):
+ return append(out, "-inf"...)
+ default:
+ return strconv.AppendFloat(out, n, 'g', -1, bitSize)
+ }
+}
+
+// WriteInt writes out the given signed integer value.
+func (e *Encoder) WriteInt(n int64) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, strconv.FormatInt(n, 10)...)
+}
+
+// WriteUint writes out the given unsigned integer value.
+func (e *Encoder) WriteUint(n uint64) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, strconv.FormatUint(n, 10)...)
+}
+
+// WriteLiteral writes out the given string as a literal value without quotes.
+// This is used for writing enum literal strings.
+func (e *Encoder) WriteLiteral(s string) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, s...)
+}
+
+// prepareNext adds possible space and indentation for the next value based
+// on last encType and indent option. It also updates e.lastType to next.
+func (e *Encoder) prepareNext(next encType) {
+ defer func() {
+ e.lastType = next
+ }()
+
+ // Single line.
+ if len(e.indent) == 0 {
+ // Add space after each field before the next one.
+ if e.lastType&(scalar|messageClose) != 0 && next == name {
+ e.out = append(e.out, ' ')
+ // Add a random extra space to make output unstable.
+ if detrand.Bool() {
+ e.out = append(e.out, ' ')
+ }
+ }
+ return
+ }
+
+ // Multi-line.
+ switch {
+ case e.lastType == name:
+ e.out = append(e.out, ' ')
+ // Add a random extra space after name: to make output unstable.
+ if detrand.Bool() {
+ e.out = append(e.out, ' ')
+ }
+
+ case e.lastType == messageOpen && next != messageClose:
+ e.indents = append(e.indents, e.indent...)
+ e.out = append(e.out, '\n')
+ e.out = append(e.out, e.indents...)
+
+ case e.lastType&(scalar|messageClose) != 0:
+ if next == messageClose {
+ e.indents = e.indents[:len(e.indents)-len(e.indent)]
+ }
+ e.out = append(e.out, '\n')
+ e.out = append(e.out, e.indents...)
+ }
+}
+
+// Snapshot returns the current snapshot for use in Reset.
+func (e *Encoder) Snapshot() encoderState {
+ return e.encoderState
+}
+
+// Reset resets the Encoder to the given encoderState from a Snapshot.
+func (e *Encoder) Reset(es encoderState) {
+ e.encoderState = es
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go
new file mode 100644
index 0000000..20c17b3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go
@@ -0,0 +1,89 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package errors implements functions to manipulate errors.
+package errors
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/internal/detrand"
+)
+
+// Error is a sentinel matching all errors produced by this package.
+var Error = errors.New("protobuf error")
+
+// New formats a string according to the format specifier and arguments and
+// returns an error that has a "proto" prefix.
+func New(f string, x ...interface{}) error {
+ return &prefixError{s: format(f, x...)}
+}
+
+type prefixError struct{ s string }
+
+var prefix = func() string {
+ // Deliberately introduce instability into the error message string to
+ // discourage users from performing error string comparisons.
+ if detrand.Bool() {
+ return "proto: " // use non-breaking spaces (U+00a0)
+ } else {
+ return "proto: " // use regular spaces (U+0020)
+ }
+}()
+
+func (e *prefixError) Error() string {
+ return prefix + e.s
+}
+
+func (e *prefixError) Unwrap() error {
+ return Error
+}
+
+// Wrap returns an error that has a "proto" prefix, the formatted string described
+// by the format specifier and arguments, and a suffix of err. The error wraps err.
+func Wrap(err error, f string, x ...interface{}) error {
+ return &wrapError{
+ s: format(f, x...),
+ err: err,
+ }
+}
+
+type wrapError struct {
+ s string
+ err error
+}
+
+func (e *wrapError) Error() string {
+ return format("%v%v: %v", prefix, e.s, e.err)
+}
+
+func (e *wrapError) Unwrap() error {
+ return e.err
+}
+
+func (e *wrapError) Is(target error) bool {
+ return target == Error
+}
+
+func format(f string, x ...interface{}) string {
+ // avoid "proto: " prefix when chaining
+ for i := 0; i < len(x); i++ {
+ switch e := x[i].(type) {
+ case *prefixError:
+ x[i] = e.s
+ case *wrapError:
+ x[i] = format("%v: %v", e.s, e.err)
+ }
+ }
+ return fmt.Sprintf(f, x...)
+}
+
+func InvalidUTF8(name string) error {
+ return New("field %v contains invalid UTF-8", name)
+}
+
+func RequiredNotSet(name string) error {
+ return New("required field %v not set", name)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
new file mode 100644
index 0000000..f90e909
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
@@ -0,0 +1,39 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.13
+
+package errors
+
+import "reflect"
+
+// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
+func Is(err, target error) bool {
+ if target == nil {
+ return err == target
+ }
+
+ isComparable := reflect.TypeOf(target).Comparable()
+ for {
+ if isComparable && err == target {
+ return true
+ }
+ if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
+ return true
+ }
+ if err = unwrap(err); err == nil {
+ return false
+ }
+ }
+}
+
+func unwrap(err error) error {
+ u, ok := err.(interface {
+ Unwrap() error
+ })
+ if !ok {
+ return nil
+ }
+ return u.Unwrap()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
new file mode 100644
index 0000000..dc05f41
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
@@ -0,0 +1,12 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.13
+
+package errors
+
+import "errors"
+
+// Is is errors.Is.
+func Is(err, target error) bool { return errors.Is(err, target) }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
new file mode 100644
index 0000000..b293b69
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
@@ -0,0 +1,158 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filedesc provides functionality for constructing descriptors.
+//
+// The types in this package implement interfaces in the protoreflect package
+// related to protobuf descripriptors.
+package filedesc
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Builder construct a protoreflect.FileDescriptor from the raw descriptor.
+type Builder struct {
+ // GoPackagePath is the Go package path that is invoking this builder.
+ GoPackagePath string
+
+ // RawDescriptor is the wire-encoded bytes of FileDescriptorProto
+ // and must be populated.
+ RawDescriptor []byte
+
+ // NumEnums is the total number of enums declared in the file.
+ NumEnums int32
+ // NumMessages is the total number of messages declared in the file.
+ // It includes the implicit message declarations for map entries.
+ NumMessages int32
+ // NumExtensions is the total number of extensions declared in the file.
+ NumExtensions int32
+ // NumServices is the total number of services declared in the file.
+ NumServices int32
+
+ // TypeResolver resolves extension field types for descriptor options.
+ // If nil, it uses protoregistry.GlobalTypes.
+ TypeResolver interface {
+ preg.ExtensionTypeResolver
+ }
+
+ // FileRegistry is use to lookup file, enum, and message dependencies.
+ // Once constructed, the file descriptor is registered here.
+ // If nil, it uses protoregistry.GlobalFiles.
+ FileRegistry interface {
+ FindFileByPath(string) (protoreflect.FileDescriptor, error)
+ FindDescriptorByName(pref.FullName) (pref.Descriptor, error)
+ RegisterFile(pref.FileDescriptor) error
+ }
+}
+
+// resolverByIndex is an interface Builder.FileRegistry may implement.
+// If so, it permits looking up an enum or message dependency based on the
+// sub-list and element index into filetype.Builder.DependencyIndexes.
+type resolverByIndex interface {
+ FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor
+ FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor
+}
+
+// Indexes of each sub-list in filetype.Builder.DependencyIndexes.
+const (
+ listFieldDeps int32 = iota
+ listExtTargets
+ listExtDeps
+ listMethInDeps
+ listMethOutDeps
+)
+
+// Out is the output of the Builder.
+type Out struct {
+ File pref.FileDescriptor
+
+ // Enums is all enum descriptors in "flattened ordering".
+ Enums []Enum
+ // Messages is all message descriptors in "flattened ordering".
+ // It includes the implicit message declarations for map entries.
+ Messages []Message
+ // Extensions is all extension descriptors in "flattened ordering".
+ Extensions []Extension
+ // Service is all service descriptors in "flattened ordering".
+ Services []Service
+}
+
+// Build constructs a FileDescriptor given the parameters set in Builder.
+// It assumes that the inputs are well-formed and panics if any inconsistencies
+// are encountered.
+//
+// If NumEnums+NumMessages+NumExtensions+NumServices is zero,
+// then Build automatically derives them from the raw descriptor.
+func (db Builder) Build() (out Out) {
+ // Populate the counts if uninitialized.
+ if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 {
+ db.unmarshalCounts(db.RawDescriptor, true)
+ }
+
+ // Initialize resolvers and registries if unpopulated.
+ if db.TypeResolver == nil {
+ db.TypeResolver = preg.GlobalTypes
+ }
+ if db.FileRegistry == nil {
+ db.FileRegistry = preg.GlobalFiles
+ }
+
+ fd := newRawFile(db)
+ out.File = fd
+ out.Enums = fd.allEnums
+ out.Messages = fd.allMessages
+ out.Extensions = fd.allExtensions
+ out.Services = fd.allServices
+
+ if err := db.FileRegistry.RegisterFile(fd); err != nil {
+ panic(err)
+ }
+ return out
+}
+
+// unmarshalCounts counts the number of enum, message, extension, and service
+// declarations in the raw message, which is either a FileDescriptorProto
+// or a MessageDescriptorProto depending on whether isFile is set.
+func (db *Builder) unmarshalCounts(b []byte, isFile bool) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ if isFile {
+ switch num {
+ case genid.FileDescriptorProto_EnumType_field_number:
+ db.NumEnums++
+ case genid.FileDescriptorProto_MessageType_field_number:
+ db.unmarshalCounts(v, false)
+ db.NumMessages++
+ case genid.FileDescriptorProto_Extension_field_number:
+ db.NumExtensions++
+ case genid.FileDescriptorProto_Service_field_number:
+ db.NumServices++
+ }
+ } else {
+ switch num {
+ case genid.DescriptorProto_EnumType_field_number:
+ db.NumEnums++
+ case genid.DescriptorProto_NestedType_field_number:
+ db.unmarshalCounts(v, false)
+ db.NumMessages++
+ case genid.DescriptorProto_Extension_field_number:
+ db.NumExtensions++
+ }
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
new file mode 100644
index 0000000..98ab142
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -0,0 +1,631 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/descfmt"
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/encoding/defval"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// The types in this file may have a suffix:
+// • L0: Contains fields common to all descriptors (except File) and
+// must be initialized up front.
+// • L1: Contains fields specific to a descriptor and
+// must be initialized up front.
+// • L2: Contains fields that are lazily initialized when constructing
+// from the raw file descriptor. When constructing as a literal, the L2
+// fields must be initialized up front.
+//
+// The types are exported so that packages like reflect/protodesc can
+// directly construct descriptors.
+
+type (
+ File struct {
+ fileRaw
+ L1 FileL1
+
+ once uint32 // atomically set if L2 is valid
+ mu sync.Mutex // protects L2
+ L2 *FileL2
+ }
+ FileL1 struct {
+ Syntax pref.Syntax
+ Path string
+ Package pref.FullName
+
+ Enums Enums
+ Messages Messages
+ Extensions Extensions
+ Services Services
+ }
+ FileL2 struct {
+ Options func() pref.ProtoMessage
+ Imports FileImports
+ Locations SourceLocations
+ }
+)
+
+func (fd *File) ParentFile() pref.FileDescriptor { return fd }
+func (fd *File) Parent() pref.Descriptor { return nil }
+func (fd *File) Index() int { return 0 }
+func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax }
+func (fd *File) Name() pref.Name { return fd.L1.Package.Name() }
+func (fd *File) FullName() pref.FullName { return fd.L1.Package }
+func (fd *File) IsPlaceholder() bool { return false }
+func (fd *File) Options() pref.ProtoMessage {
+ if f := fd.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.File
+}
+func (fd *File) Path() string { return fd.L1.Path }
+func (fd *File) Package() pref.FullName { return fd.L1.Package }
+func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports }
+func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums }
+func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages }
+func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions }
+func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services }
+func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations }
+func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
+func (fd *File) ProtoType(pref.FileDescriptor) {}
+func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
+
+func (fd *File) lazyInit() *FileL2 {
+ if atomic.LoadUint32(&fd.once) == 0 {
+ fd.lazyInitOnce()
+ }
+ return fd.L2
+}
+
+func (fd *File) lazyInitOnce() {
+ fd.mu.Lock()
+ if fd.L2 == nil {
+ fd.lazyRawInit() // recursively initializes all L2 structures
+ }
+ atomic.StoreUint32(&fd.once, 1)
+ fd.mu.Unlock()
+}
+
+// GoPackagePath is a pseudo-internal API for determining the Go package path
+// that this file descriptor is declared in.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (fd *File) GoPackagePath() string {
+ return fd.builder.GoPackagePath
+}
+
+type (
+ Enum struct {
+ Base
+ L1 EnumL1
+ L2 *EnumL2 // protected by fileDesc.once
+ }
+ EnumL1 struct {
+ eagerValues bool // controls whether EnumL2.Values is already populated
+ }
+ EnumL2 struct {
+ Options func() pref.ProtoMessage
+ Values EnumValues
+ ReservedNames Names
+ ReservedRanges EnumRanges
+ }
+
+ EnumValue struct {
+ Base
+ L1 EnumValueL1
+ }
+ EnumValueL1 struct {
+ Options func() pref.ProtoMessage
+ Number pref.EnumNumber
+ }
+)
+
+func (ed *Enum) Options() pref.ProtoMessage {
+ if f := ed.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Enum
+}
+func (ed *Enum) Values() pref.EnumValueDescriptors {
+ if ed.L1.eagerValues {
+ return &ed.L2.Values
+ }
+ return &ed.lazyInit().Values
+}
+func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames }
+func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges }
+func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
+func (ed *Enum) ProtoType(pref.EnumDescriptor) {}
+func (ed *Enum) lazyInit() *EnumL2 {
+ ed.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return ed.L2
+}
+
+func (ed *EnumValue) Options() pref.ProtoMessage {
+ if f := ed.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.EnumValue
+}
+func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number }
+func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
+func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {}
+
+type (
+ Message struct {
+ Base
+ L1 MessageL1
+ L2 *MessageL2 // protected by fileDesc.once
+ }
+ MessageL1 struct {
+ Enums Enums
+ Messages Messages
+ Extensions Extensions
+ IsMapEntry bool // promoted from google.protobuf.MessageOptions
+ IsMessageSet bool // promoted from google.protobuf.MessageOptions
+ }
+ MessageL2 struct {
+ Options func() pref.ProtoMessage
+ Fields Fields
+ Oneofs Oneofs
+ ReservedNames Names
+ ReservedRanges FieldRanges
+ RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality
+ ExtensionRanges FieldRanges
+ ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges
+ }
+
+ Field struct {
+ Base
+ L1 FieldL1
+ }
+ FieldL1 struct {
+ Options func() pref.ProtoMessage
+ Number pref.FieldNumber
+ Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers
+ Kind pref.Kind
+ StringName stringName
+ IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
+ IsWeak bool // promoted from google.protobuf.FieldOptions
+ HasPacked bool // promoted from google.protobuf.FieldOptions
+ IsPacked bool // promoted from google.protobuf.FieldOptions
+ HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
+ EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
+ Default defaultValue
+ ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields
+ Enum pref.EnumDescriptor
+ Message pref.MessageDescriptor
+ }
+
+ Oneof struct {
+ Base
+ L1 OneofL1
+ }
+ OneofL1 struct {
+ Options func() pref.ProtoMessage
+ Fields OneofFields // must be consistent with Message.Fields.ContainingOneof
+ }
+)
+
+func (md *Message) Options() pref.ProtoMessage {
+ if f := md.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Message
+}
+func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry }
+func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields }
+func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs }
+func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames }
+func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges }
+func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers }
+func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges }
+func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage {
+ if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil {
+ return f()
+ }
+ return descopts.ExtensionRange
+}
+func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums }
+func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages }
+func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions }
+func (md *Message) ProtoType(pref.MessageDescriptor) {}
+func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+func (md *Message) lazyInit() *MessageL2 {
+ md.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return md.L2
+}
+
+// IsMessageSet is a pseudo-internal API for checking whether a message
+// should serialize in the proto1 message format.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (md *Message) IsMessageSet() bool {
+ return md.L1.IsMessageSet
+}
+
+func (fd *Field) Options() pref.ProtoMessage {
+ if f := fd.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.Field
+}
+func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number }
+func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality }
+func (fd *Field) Kind() pref.Kind { return fd.L1.Kind }
+func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON }
+func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) }
+func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) }
+func (fd *Field) HasPresence() bool {
+ return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
+}
+func (fd *Field) HasOptionalKeyword() bool {
+ return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
+}
+func (fd *Field) IsPacked() bool {
+ if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated {
+ switch fd.L1.Kind {
+ case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind:
+ default:
+ return true
+ }
+ }
+ return fd.L1.IsPacked
+}
+func (fd *Field) IsExtension() bool { return false }
+func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() }
+func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
+func (fd *Field) MapKey() pref.FieldDescriptor {
+ if !fd.IsMap() {
+ return nil
+ }
+ return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number)
+}
+func (fd *Field) MapValue() pref.FieldDescriptor {
+ if !fd.IsMap() {
+ return nil
+ }
+ return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number)
+}
+func (fd *Field) HasDefault() bool { return fd.L1.Default.has }
+func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) }
+func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum }
+func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof }
+func (fd *Field) ContainingMessage() pref.MessageDescriptor {
+ return fd.L0.Parent.(pref.MessageDescriptor)
+}
+func (fd *Field) Enum() pref.EnumDescriptor {
+ return fd.L1.Enum
+}
+func (fd *Field) Message() pref.MessageDescriptor {
+ if fd.L1.IsWeak {
+ if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
+ return d.(pref.MessageDescriptor)
+ }
+ }
+ return fd.L1.Message
+}
+func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
+func (fd *Field) ProtoType(pref.FieldDescriptor) {}
+
+// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8
+// validation for the string field. This exists for Google-internal use only
+// since proto3 did not enforce UTF-8 validity prior to the open-source release.
+// If this method does not exist, the default is to enforce valid UTF-8.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (fd *Field) EnforceUTF8() bool {
+ if fd.L1.HasEnforceUTF8 {
+ return fd.L1.EnforceUTF8
+ }
+ return fd.L0.ParentFile.L1.Syntax == pref.Proto3
+}
+
+func (od *Oneof) IsSynthetic() bool {
+ return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword()
+}
+func (od *Oneof) Options() pref.ProtoMessage {
+ if f := od.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.Oneof
+}
+func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields }
+func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) }
+func (od *Oneof) ProtoType(pref.OneofDescriptor) {}
+
+type (
+ Extension struct {
+ Base
+ L1 ExtensionL1
+ L2 *ExtensionL2 // protected by fileDesc.once
+ }
+ ExtensionL1 struct {
+ Number pref.FieldNumber
+ Extendee pref.MessageDescriptor
+ Cardinality pref.Cardinality
+ Kind pref.Kind
+ }
+ ExtensionL2 struct {
+ Options func() pref.ProtoMessage
+ StringName stringName
+ IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
+ IsPacked bool // promoted from google.protobuf.FieldOptions
+ Default defaultValue
+ Enum pref.EnumDescriptor
+ Message pref.MessageDescriptor
+ }
+)
+
+func (xd *Extension) Options() pref.ProtoMessage {
+ if f := xd.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Field
+}
+func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number }
+func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality }
+func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind }
+func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON }
+func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) }
+func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) }
+func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated }
+func (xd *Extension) HasOptionalKeyword() bool {
+ return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional
+}
+func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
+func (xd *Extension) IsExtension() bool { return true }
+func (xd *Extension) IsWeak() bool { return false }
+func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated }
+func (xd *Extension) IsMap() bool { return false }
+func (xd *Extension) MapKey() pref.FieldDescriptor { return nil }
+func (xd *Extension) MapValue() pref.FieldDescriptor { return nil }
+func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has }
+func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) }
+func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum }
+func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil }
+func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee }
+func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum }
+func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message }
+func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) }
+func (xd *Extension) ProtoType(pref.FieldDescriptor) {}
+func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {}
+func (xd *Extension) lazyInit() *ExtensionL2 {
+ xd.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return xd.L2
+}
+
+type (
+ Service struct {
+ Base
+ L1 ServiceL1
+ L2 *ServiceL2 // protected by fileDesc.once
+ }
+ ServiceL1 struct{}
+ ServiceL2 struct {
+ Options func() pref.ProtoMessage
+ Methods Methods
+ }
+
+ Method struct {
+ Base
+ L1 MethodL1
+ }
+ MethodL1 struct {
+ Options func() pref.ProtoMessage
+ Input pref.MessageDescriptor
+ Output pref.MessageDescriptor
+ IsStreamingClient bool
+ IsStreamingServer bool
+ }
+)
+
+func (sd *Service) Options() pref.ProtoMessage {
+ if f := sd.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Service
+}
+func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods }
+func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) }
+func (sd *Service) ProtoType(pref.ServiceDescriptor) {}
+func (sd *Service) ProtoInternal(pragma.DoNotImplement) {}
+func (sd *Service) lazyInit() *ServiceL2 {
+ sd.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return sd.L2
+}
+
+func (md *Method) Options() pref.ProtoMessage {
+ if f := md.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.Method
+}
+func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input }
+func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output }
+func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient }
+func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer }
+func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+func (md *Method) ProtoType(pref.MethodDescriptor) {}
+func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
+
+// Surrogate files are can be used to create standalone descriptors
+// where the syntax is only information derived from the parent file.
+var (
+ SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}}
+ SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}}
+)
+
+type (
+ Base struct {
+ L0 BaseL0
+ }
+ BaseL0 struct {
+ FullName pref.FullName // must be populated
+ ParentFile *File // must be populated
+ Parent pref.Descriptor
+ Index int
+ }
+)
+
+func (d *Base) Name() pref.Name { return d.L0.FullName.Name() }
+func (d *Base) FullName() pref.FullName { return d.L0.FullName }
+func (d *Base) ParentFile() pref.FileDescriptor {
+ if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 {
+ return nil // surrogate files are not real parents
+ }
+ return d.L0.ParentFile
+}
+func (d *Base) Parent() pref.Descriptor { return d.L0.Parent }
+func (d *Base) Index() int { return d.L0.Index }
+func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() }
+func (d *Base) IsPlaceholder() bool { return false }
+func (d *Base) ProtoInternal(pragma.DoNotImplement) {}
+
+type stringName struct {
+ hasJSON bool
+ once sync.Once
+ nameJSON string
+ nameText string
+}
+
+// InitJSON initializes the name. It is exported for use by other internal packages.
+func (s *stringName) InitJSON(name string) {
+ s.hasJSON = true
+ s.nameJSON = name
+}
+
+func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName {
+ s.once.Do(func() {
+ if fd.IsExtension() {
+ // For extensions, JSON and text are formatted the same way.
+ var name string
+ if messageset.IsMessageSetExtension(fd) {
+ name = string("[" + fd.FullName().Parent() + "]")
+ } else {
+ name = string("[" + fd.FullName() + "]")
+ }
+ s.nameJSON = name
+ s.nameText = name
+ } else {
+ // Format the JSON name.
+ if !s.hasJSON {
+ s.nameJSON = strs.JSONCamelCase(string(fd.Name()))
+ }
+
+ // Format the text name.
+ s.nameText = string(fd.Name())
+ if fd.Kind() == pref.GroupKind {
+ s.nameText = string(fd.Message().Name())
+ }
+ }
+ })
+ return s
+}
+
+func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON }
+func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText }
+
+func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue {
+ dv := defaultValue{has: v.IsValid(), val: v, enum: ev}
+ if b, ok := v.Interface().([]byte); ok {
+ // Store a copy of the default bytes, so that we can detect
+ // accidental mutations of the original value.
+ dv.bytes = append([]byte(nil), b...)
+ }
+ return dv
+}
+
+func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue {
+ var evs pref.EnumValueDescriptors
+ if k == pref.EnumKind {
+ // If the enum is declared within the same file, be careful not to
+ // blindly call the Values method, lest we bind ourselves in a deadlock.
+ if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf {
+ evs = &e.L2.Values
+ } else {
+ evs = ed.Values()
+ }
+
+ // If we are unable to resolve the enum dependency, use a placeholder
+ // enum value since we will not be able to parse the default value.
+ if ed.IsPlaceholder() && pref.Name(b).IsValid() {
+ v := pref.ValueOfEnum(0)
+ ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b)))
+ return DefaultValue(v, ev)
+ }
+ }
+
+ v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor)
+ if err != nil {
+ panic(err)
+ }
+ return DefaultValue(v, ev)
+}
+
+type defaultValue struct {
+ has bool
+ val pref.Value
+ enum pref.EnumValueDescriptor
+ bytes []byte
+}
+
+func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value {
+ // Return the zero value as the default if unpopulated.
+ if !dv.has {
+ if fd.Cardinality() == pref.Repeated {
+ return pref.Value{}
+ }
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return pref.ValueOfBool(false)
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ return pref.ValueOfInt32(0)
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ return pref.ValueOfInt64(0)
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ return pref.ValueOfUint32(0)
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ return pref.ValueOfUint64(0)
+ case pref.FloatKind:
+ return pref.ValueOfFloat32(0)
+ case pref.DoubleKind:
+ return pref.ValueOfFloat64(0)
+ case pref.StringKind:
+ return pref.ValueOfString("")
+ case pref.BytesKind:
+ return pref.ValueOfBytes(nil)
+ case pref.EnumKind:
+ if evs := fd.Enum().Values(); evs.Len() > 0 {
+ return pref.ValueOfEnum(evs.Get(0).Number())
+ }
+ return pref.ValueOfEnum(0)
+ }
+ }
+
+ if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) {
+ // TODO: Avoid panic if we're running with the race detector
+ // and instead spawn a goroutine that periodically resets
+ // this value back to the original to induce a race.
+ panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName()))
+ }
+ return dv.val
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
new file mode 100644
index 0000000..66e1fee
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -0,0 +1,471 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// fileRaw is a data struct used when initializing a file descriptor from
+// a raw FileDescriptorProto.
+type fileRaw struct {
+ builder Builder
+ allEnums []Enum
+ allMessages []Message
+ allExtensions []Extension
+ allServices []Service
+}
+
+func newRawFile(db Builder) *File {
+ fd := &File{fileRaw: fileRaw{builder: db}}
+ fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices)
+ fd.unmarshalSeed(db.RawDescriptor)
+
+ // Extended message targets are eagerly resolved since registration
+ // needs this information at program init time.
+ for i := range fd.allExtensions {
+ xd := &fd.allExtensions[i]
+ xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i))
+ }
+
+ fd.checkDecls()
+ return fd
+}
+
+// initDecls pre-allocates slices for the exact number of enums, messages
+// (including map entries), extensions, and services declared in the proto file.
+// This is done to avoid regrowing the slice, which would change the address
+// for any previously seen declaration.
+//
+// The alloc methods "allocates" slices by pulling from the capacity.
+func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) {
+ fd.allEnums = make([]Enum, 0, numEnums)
+ fd.allMessages = make([]Message, 0, numMessages)
+ fd.allExtensions = make([]Extension, 0, numExtensions)
+ fd.allServices = make([]Service, 0, numServices)
+}
+
+func (fd *File) allocEnums(n int) []Enum {
+ total := len(fd.allEnums)
+ es := fd.allEnums[total : total+n]
+ fd.allEnums = fd.allEnums[:total+n]
+ return es
+}
+func (fd *File) allocMessages(n int) []Message {
+ total := len(fd.allMessages)
+ ms := fd.allMessages[total : total+n]
+ fd.allMessages = fd.allMessages[:total+n]
+ return ms
+}
+func (fd *File) allocExtensions(n int) []Extension {
+ total := len(fd.allExtensions)
+ xs := fd.allExtensions[total : total+n]
+ fd.allExtensions = fd.allExtensions[:total+n]
+ return xs
+}
+func (fd *File) allocServices(n int) []Service {
+ total := len(fd.allServices)
+ xs := fd.allServices[total : total+n]
+ fd.allServices = fd.allServices[:total+n]
+ return xs
+}
+
+// checkDecls performs a sanity check that the expected number of expected
+// declarations matches the number that were found in the descriptor proto.
+func (fd *File) checkDecls() {
+ switch {
+ case len(fd.allEnums) != cap(fd.allEnums):
+ case len(fd.allMessages) != cap(fd.allMessages):
+ case len(fd.allExtensions) != cap(fd.allExtensions):
+ case len(fd.allServices) != cap(fd.allServices):
+ default:
+ return
+ }
+ panic("mismatching cardinality")
+}
+
+func (fd *File) unmarshalSeed(b []byte) {
+ sb := getBuilder()
+ defer putBuilder(sb)
+
+ var prevField pref.FieldNumber
+ var numEnums, numMessages, numExtensions, numServices int
+ var posEnums, posMessages, posExtensions, posServices int
+ b0 := b
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.FileDescriptorProto_Syntax_field_number:
+ switch string(v) {
+ case "proto2":
+ fd.L1.Syntax = pref.Proto2
+ case "proto3":
+ fd.L1.Syntax = pref.Proto3
+ default:
+ panic("invalid syntax")
+ }
+ case genid.FileDescriptorProto_Name_field_number:
+ fd.L1.Path = sb.MakeString(v)
+ case genid.FileDescriptorProto_Package_field_number:
+ fd.L1.Package = pref.FullName(sb.MakeString(v))
+ case genid.FileDescriptorProto_EnumType_field_number:
+ if prevField != genid.FileDescriptorProto_EnumType_field_number {
+ if numEnums > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posEnums = len(b0) - len(b) - n - m
+ }
+ numEnums++
+ case genid.FileDescriptorProto_MessageType_field_number:
+ if prevField != genid.FileDescriptorProto_MessageType_field_number {
+ if numMessages > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posMessages = len(b0) - len(b) - n - m
+ }
+ numMessages++
+ case genid.FileDescriptorProto_Extension_field_number:
+ if prevField != genid.FileDescriptorProto_Extension_field_number {
+ if numExtensions > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posExtensions = len(b0) - len(b) - n - m
+ }
+ numExtensions++
+ case genid.FileDescriptorProto_Service_field_number:
+ if prevField != genid.FileDescriptorProto_Service_field_number {
+ if numServices > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posServices = len(b0) - len(b) - n - m
+ }
+ numServices++
+ }
+ prevField = num
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ prevField = -1 // ignore known field numbers of unknown wire type
+ }
+ }
+
+ // If syntax is missing, it is assumed to be proto2.
+ if fd.L1.Syntax == 0 {
+ fd.L1.Syntax = pref.Proto2
+ }
+
+ // Must allocate all declarations before parsing each descriptor type
+ // to ensure we handled all descriptors in "flattened ordering".
+ if numEnums > 0 {
+ fd.L1.Enums.List = fd.allocEnums(numEnums)
+ }
+ if numMessages > 0 {
+ fd.L1.Messages.List = fd.allocMessages(numMessages)
+ }
+ if numExtensions > 0 {
+ fd.L1.Extensions.List = fd.allocExtensions(numExtensions)
+ }
+ if numServices > 0 {
+ fd.L1.Services.List = fd.allocServices(numServices)
+ }
+
+ if numEnums > 0 {
+ b := b0[posEnums:]
+ for i := range fd.L1.Enums.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+ if numMessages > 0 {
+ b := b0[posMessages:]
+ for i := range fd.L1.Messages.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+ if numExtensions > 0 {
+ b := b0[posExtensions:]
+ for i := range fd.L1.Extensions.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+ if numServices > 0 {
+ b := b0[posServices:]
+ for i := range fd.L1.Services.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+}
+
+func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ ed.L0.ParentFile = pf
+ ed.L0.Parent = pd
+ ed.L0.Index = i
+
+ var numValues int
+ for b := b; len(b) > 0; {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.EnumDescriptorProto_Name_field_number:
+ ed.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case genid.EnumDescriptorProto_Value_field_number:
+ numValues++
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+
+ // Only construct enum value descriptors for top-level enums since
+ // they are needed for registration.
+ if pd != pf {
+ return
+ }
+ ed.L1.eagerValues = true
+ ed.L2 = new(EnumL2)
+ ed.L2.Values.List = make([]EnumValue, numValues)
+ for i := 0; len(b) > 0; {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.EnumDescriptorProto_Value_field_number:
+ ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i)
+ i++
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ md.L0.ParentFile = pf
+ md.L0.Parent = pd
+ md.L0.Index = i
+
+ var prevField pref.FieldNumber
+ var numEnums, numMessages, numExtensions int
+ var posEnums, posMessages, posExtensions int
+ b0 := b
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.DescriptorProto_Name_field_number:
+ md.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case genid.DescriptorProto_EnumType_field_number:
+ if prevField != genid.DescriptorProto_EnumType_field_number {
+ if numEnums > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posEnums = len(b0) - len(b) - n - m
+ }
+ numEnums++
+ case genid.DescriptorProto_NestedType_field_number:
+ if prevField != genid.DescriptorProto_NestedType_field_number {
+ if numMessages > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posMessages = len(b0) - len(b) - n - m
+ }
+ numMessages++
+ case genid.DescriptorProto_Extension_field_number:
+ if prevField != genid.DescriptorProto_Extension_field_number {
+ if numExtensions > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posExtensions = len(b0) - len(b) - n - m
+ }
+ numExtensions++
+ case genid.DescriptorProto_Options_field_number:
+ md.unmarshalSeedOptions(v)
+ }
+ prevField = num
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ prevField = -1 // ignore known field numbers of unknown wire type
+ }
+ }
+
+ // Must allocate all declarations before parsing each descriptor type
+ // to ensure we handled all descriptors in "flattened ordering".
+ if numEnums > 0 {
+ md.L1.Enums.List = pf.allocEnums(numEnums)
+ }
+ if numMessages > 0 {
+ md.L1.Messages.List = pf.allocMessages(numMessages)
+ }
+ if numExtensions > 0 {
+ md.L1.Extensions.List = pf.allocExtensions(numExtensions)
+ }
+
+ if numEnums > 0 {
+ b := b0[posEnums:]
+ for i := range md.L1.Enums.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i)
+ b = b[n+m:]
+ }
+ }
+ if numMessages > 0 {
+ b := b0[posMessages:]
+ for i := range md.L1.Messages.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i)
+ b = b[n+m:]
+ }
+ }
+ if numExtensions > 0 {
+ b := b0[posExtensions:]
+ for i := range md.L1.Extensions.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i)
+ b = b[n+m:]
+ }
+ }
+}
+
+func (md *Message) unmarshalSeedOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.MessageOptions_MapEntry_field_number:
+ md.L1.IsMapEntry = protowire.DecodeBool(v)
+ case genid.MessageOptions_MessageSetWireFormat_field_number:
+ md.L1.IsMessageSet = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ xd.L0.ParentFile = pf
+ xd.L0.Parent = pd
+ xd.L0.Index = i
+
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldDescriptorProto_Number_field_number:
+ xd.L1.Number = pref.FieldNumber(v)
+ case genid.FieldDescriptorProto_Label_field_number:
+ xd.L1.Cardinality = pref.Cardinality(v)
+ case genid.FieldDescriptorProto_Type_field_number:
+ xd.L1.Kind = pref.Kind(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldDescriptorProto_Name_field_number:
+ xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case genid.FieldDescriptorProto_Extendee_field_number:
+ xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ sd.L0.ParentFile = pf
+ sd.L0.Parent = pd
+ sd.L0.Index = i
+
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.ServiceDescriptorProto_Name_field_number:
+ sd.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+var nameBuilderPool = sync.Pool{
+ New: func() interface{} { return new(strs.Builder) },
+}
+
+func getBuilder() *strs.Builder {
+ return nameBuilderPool.Get().(*strs.Builder)
+}
+func putBuilder(b *strs.Builder) {
+ nameBuilderPool.Put(b)
+}
+
+// makeFullName converts b to a protoreflect.FullName,
+// where b must start with a leading dot.
+func makeFullName(sb *strs.Builder, b []byte) pref.FullName {
+ if len(b) == 0 || b[0] != '.' {
+ panic("name reference must be fully qualified")
+ }
+ return pref.FullName(sb.MakeString(b[1:]))
+}
+
+func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName {
+ return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix)))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
new file mode 100644
index 0000000..198451e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -0,0 +1,704 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "reflect"
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func (fd *File) lazyRawInit() {
+ fd.unmarshalFull(fd.builder.RawDescriptor)
+ fd.resolveMessages()
+ fd.resolveExtensions()
+ fd.resolveServices()
+}
+
+func (file *File) resolveMessages() {
+ var depIdx int32
+ for i := range file.allMessages {
+ md := &file.allMessages[i]
+
+ // Resolve message field dependencies.
+ for j := range md.L2.Fields.List {
+ fd := &md.L2.Fields.List[j]
+
+ // Weak fields are resolved upon actual use.
+ if fd.L1.IsWeak {
+ continue
+ }
+
+ // Resolve message field dependency.
+ switch fd.L1.Kind {
+ case pref.EnumKind:
+ fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx)
+ depIdx++
+ case pref.MessageKind, pref.GroupKind:
+ fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
+ depIdx++
+ }
+
+ // Default is resolved here since it depends on Enum being resolved.
+ if v := fd.L1.Default.val; v.IsValid() {
+ fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum)
+ }
+ }
+ }
+}
+
+func (file *File) resolveExtensions() {
+ var depIdx int32
+ for i := range file.allExtensions {
+ xd := &file.allExtensions[i]
+
+ // Resolve extension field dependency.
+ switch xd.L1.Kind {
+ case pref.EnumKind:
+ xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx)
+ depIdx++
+ case pref.MessageKind, pref.GroupKind:
+ xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx)
+ depIdx++
+ }
+
+ // Default is resolved here since it depends on Enum being resolved.
+ if v := xd.L2.Default.val; v.IsValid() {
+ xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum)
+ }
+ }
+}
+
+func (file *File) resolveServices() {
+ var depIdx int32
+ for i := range file.allServices {
+ sd := &file.allServices[i]
+
+ // Resolve method dependencies.
+ for j := range sd.L2.Methods.List {
+ md := &sd.L2.Methods.List[j]
+ md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx)
+ md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx)
+ depIdx++
+ }
+ }
+}
+
+func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor {
+ r := file.builder.FileRegistry
+ if r, ok := r.(resolverByIndex); ok {
+ if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil {
+ return ed2
+ }
+ }
+ for i := range file.allEnums {
+ if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() {
+ return ed2
+ }
+ }
+ if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil {
+ return d.(pref.EnumDescriptor)
+ }
+ return ed
+}
+
+func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor {
+ r := file.builder.FileRegistry
+ if r, ok := r.(resolverByIndex); ok {
+ if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil {
+ return md2
+ }
+ }
+ for i := range file.allMessages {
+ if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() {
+ return md2
+ }
+ }
+ if d, _ := r.FindDescriptorByName(md.FullName()); d != nil {
+ return d.(pref.MessageDescriptor)
+ }
+ return md
+}
+
+func (fd *File) unmarshalFull(b []byte) {
+ sb := getBuilder()
+ defer putBuilder(sb)
+
+ var enumIdx, messageIdx, extensionIdx, serviceIdx int
+ var rawOptions []byte
+ fd.L2 = new(FileL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.FileDescriptorProto_PublicDependency_field_number:
+ fd.L2.Imports[v].IsPublic = true
+ case genid.FileDescriptorProto_WeakDependency_field_number:
+ fd.L2.Imports[v].IsWeak = true
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.FileDescriptorProto_Dependency_field_number:
+ path := sb.MakeString(v)
+ imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
+ if imp == nil {
+ imp = PlaceholderFile(path)
+ }
+ fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp})
+ case genid.FileDescriptorProto_EnumType_field_number:
+ fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
+ enumIdx++
+ case genid.FileDescriptorProto_MessageType_field_number:
+ fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
+ messageIdx++
+ case genid.FileDescriptorProto_Extension_field_number:
+ fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
+ extensionIdx++
+ case genid.FileDescriptorProto_Service_field_number:
+ fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb)
+ serviceIdx++
+ case genid.FileDescriptorProto_Options_field_number:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
+}
+
+func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawValues [][]byte
+ var rawOptions []byte
+ if !ed.L1.eagerValues {
+ ed.L2 = new(EnumL2)
+ }
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.EnumDescriptorProto_Value_field_number:
+ rawValues = append(rawValues, v)
+ case genid.EnumDescriptorProto_ReservedName_field_number:
+ ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
+ case genid.EnumDescriptorProto_ReservedRange_field_number:
+ ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v))
+ case genid.EnumDescriptorProto_Options_field_number:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if !ed.L1.eagerValues && len(rawValues) > 0 {
+ ed.L2.Values.List = make([]EnumValue, len(rawValues))
+ for i, b := range rawValues {
+ ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i)
+ }
+ }
+ ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions)
+}
+
+func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number:
+ r[0] = pref.EnumNumber(v)
+ case genid.EnumDescriptorProto_EnumReservedRange_End_field_number:
+ r[1] = pref.EnumNumber(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ return r
+}
+
+func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ vd.L0.ParentFile = pf
+ vd.L0.Parent = pd
+ vd.L0.Index = i
+
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.EnumValueDescriptorProto_Number_field_number:
+ vd.L1.Number = pref.EnumNumber(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.EnumValueDescriptorProto_Name_field_number:
+ // NOTE: Enum values are in the same scope as the enum parent.
+ vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v)
+ case genid.EnumValueDescriptorProto_Options_field_number:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions)
+}
+
+func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawFields, rawOneofs [][]byte
+ var enumIdx, messageIdx, extensionIdx int
+ var rawOptions []byte
+ md.L2 = new(MessageL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.DescriptorProto_Field_field_number:
+ rawFields = append(rawFields, v)
+ case genid.DescriptorProto_OneofDecl_field_number:
+ rawOneofs = append(rawOneofs, v)
+ case genid.DescriptorProto_ReservedName_field_number:
+ md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
+ case genid.DescriptorProto_ReservedRange_field_number:
+ md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v))
+ case genid.DescriptorProto_ExtensionRange_field_number:
+ r, rawOptions := unmarshalMessageExtensionRange(v)
+ opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions)
+ md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r)
+ md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts)
+ case genid.DescriptorProto_EnumType_field_number:
+ md.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
+ enumIdx++
+ case genid.DescriptorProto_NestedType_field_number:
+ md.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
+ messageIdx++
+ case genid.DescriptorProto_Extension_field_number:
+ md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
+ extensionIdx++
+ case genid.DescriptorProto_Options_field_number:
+ md.unmarshalOptions(v)
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if len(rawFields) > 0 || len(rawOneofs) > 0 {
+ md.L2.Fields.List = make([]Field, len(rawFields))
+ md.L2.Oneofs.List = make([]Oneof, len(rawOneofs))
+ for i, b := range rawFields {
+ fd := &md.L2.Fields.List[i]
+ fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
+ if fd.L1.Cardinality == pref.Required {
+ md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number)
+ }
+ }
+ for i, b := range rawOneofs {
+ od := &md.L2.Oneofs.List[i]
+ od.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
+ }
+ }
+ md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions)
+}
+
+func (md *Message) unmarshalOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.MessageOptions_MapEntry_field_number:
+ md.L1.IsMapEntry = protowire.DecodeBool(v)
+ case genid.MessageOptions_MessageSetWireFormat_field_number:
+ md.L1.IsMessageSet = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.DescriptorProto_ReservedRange_Start_field_number:
+ r[0] = pref.FieldNumber(v)
+ case genid.DescriptorProto_ReservedRange_End_field_number:
+ r[1] = pref.FieldNumber(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ return r
+}
+
+func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.DescriptorProto_ExtensionRange_Start_field_number:
+ r[0] = pref.FieldNumber(v)
+ case genid.DescriptorProto_ExtensionRange_End_field_number:
+ r[1] = pref.FieldNumber(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.DescriptorProto_ExtensionRange_Options_field_number:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ return r, rawOptions
+}
+
+func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ fd.L0.ParentFile = pf
+ fd.L0.Parent = pd
+ fd.L0.Index = i
+
+ var rawTypeName []byte
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldDescriptorProto_Number_field_number:
+ fd.L1.Number = pref.FieldNumber(v)
+ case genid.FieldDescriptorProto_Label_field_number:
+ fd.L1.Cardinality = pref.Cardinality(v)
+ case genid.FieldDescriptorProto_Type_field_number:
+ fd.L1.Kind = pref.Kind(v)
+ case genid.FieldDescriptorProto_OneofIndex_field_number:
+ // In Message.unmarshalFull, we allocate slices for both
+ // the field and oneof descriptors before unmarshaling either
+ // of them. This ensures pointers to slice elements are stable.
+ od := &pd.(*Message).L2.Oneofs.List[v]
+ od.L1.Fields.List = append(od.L1.Fields.List, fd)
+ if fd.L1.ContainingOneof != nil {
+ panic("oneof type already set")
+ }
+ fd.L1.ContainingOneof = od
+ case genid.FieldDescriptorProto_Proto3Optional_field_number:
+ fd.L1.IsProto3Optional = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldDescriptorProto_Name_field_number:
+ fd.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case genid.FieldDescriptorProto_JsonName_field_number:
+ fd.L1.StringName.InitJSON(sb.MakeString(v))
+ case genid.FieldDescriptorProto_DefaultValue_field_number:
+ fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages
+ case genid.FieldDescriptorProto_TypeName_field_number:
+ rawTypeName = v
+ case genid.FieldDescriptorProto_Options_field_number:
+ fd.unmarshalOptions(v)
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if rawTypeName != nil {
+ name := makeFullName(sb, rawTypeName)
+ switch fd.L1.Kind {
+ case pref.EnumKind:
+ fd.L1.Enum = PlaceholderEnum(name)
+ case pref.MessageKind, pref.GroupKind:
+ fd.L1.Message = PlaceholderMessage(name)
+ }
+ }
+ fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
+}
+
+func (fd *Field) unmarshalOptions(b []byte) {
+ const FieldOptions_EnforceUTF8 = 13
+
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldOptions_Packed_field_number:
+ fd.L1.HasPacked = true
+ fd.L1.IsPacked = protowire.DecodeBool(v)
+ case genid.FieldOptions_Weak_field_number:
+ fd.L1.IsWeak = protowire.DecodeBool(v)
+ case FieldOptions_EnforceUTF8:
+ fd.L1.HasEnforceUTF8 = true
+ fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ od.L0.ParentFile = pf
+ od.L0.Parent = pd
+ od.L0.Index = i
+
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.OneofDescriptorProto_Name_field_number:
+ od.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case genid.OneofDescriptorProto_Options_field_number:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions)
+}
+
+func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawTypeName []byte
+ var rawOptions []byte
+ xd.L2 = new(ExtensionL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldDescriptorProto_Proto3Optional_field_number:
+ xd.L2.IsProto3Optional = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldDescriptorProto_JsonName_field_number:
+ xd.L2.StringName.InitJSON(sb.MakeString(v))
+ case genid.FieldDescriptorProto_DefaultValue_field_number:
+ xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions
+ case genid.FieldDescriptorProto_TypeName_field_number:
+ rawTypeName = v
+ case genid.FieldDescriptorProto_Options_field_number:
+ xd.unmarshalOptions(v)
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if rawTypeName != nil {
+ name := makeFullName(sb, rawTypeName)
+ switch xd.L1.Kind {
+ case pref.EnumKind:
+ xd.L2.Enum = PlaceholderEnum(name)
+ case pref.MessageKind, pref.GroupKind:
+ xd.L2.Message = PlaceholderMessage(name)
+ }
+ }
+ xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
+}
+
+func (xd *Extension) unmarshalOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldOptions_Packed_field_number:
+ xd.L2.IsPacked = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawMethods [][]byte
+ var rawOptions []byte
+ sd.L2 = new(ServiceL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.ServiceDescriptorProto_Method_field_number:
+ rawMethods = append(rawMethods, v)
+ case genid.ServiceDescriptorProto_Options_field_number:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if len(rawMethods) > 0 {
+ sd.L2.Methods.List = make([]Method, len(rawMethods))
+ for i, b := range rawMethods {
+ sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i)
+ }
+ }
+ sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions)
+}
+
+func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ md.L0.ParentFile = pf
+ md.L0.Parent = pd
+ md.L0.Index = i
+
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.MethodDescriptorProto_ClientStreaming_field_number:
+ md.L1.IsStreamingClient = protowire.DecodeBool(v)
+ case genid.MethodDescriptorProto_ServerStreaming_field_number:
+ md.L1.IsStreamingServer = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.MethodDescriptorProto_Name_field_number:
+ md.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case genid.MethodDescriptorProto_InputType_field_number:
+ md.L1.Input = PlaceholderMessage(makeFullName(sb, v))
+ case genid.MethodDescriptorProto_OutputType_field_number:
+ md.L1.Output = PlaceholderMessage(makeFullName(sb, v))
+ case genid.MethodDescriptorProto_Options_field_number:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions)
+}
+
+// appendOptions appends src to dst, where the returned slice is never nil.
+// This is necessary to distinguish between empty and unpopulated options.
+func appendOptions(dst, src []byte) []byte {
+ if dst == nil {
+ dst = []byte{}
+ }
+ return append(dst, src...)
+}
+
+// optionsUnmarshaler constructs a lazy unmarshal function for an options message.
+//
+// The type of message to unmarshal to is passed as a pointer since the
+// vars in descopts may not yet be populated at the time this function is called.
+func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage {
+ if b == nil {
+ return nil
+ }
+ var opts pref.ProtoMessage
+ var once sync.Once
+ return func() pref.ProtoMessage {
+ once.Do(func() {
+ if *p == nil {
+ panic("Descriptor.Options called without importing the descriptor package")
+ }
+ opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage)
+ if err := (proto.UnmarshalOptions{
+ AllowPartial: true,
+ Resolver: db.TypeResolver,
+ }).Unmarshal(b, opts); err != nil {
+ panic(err)
+ }
+ })
+ return opts
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
new file mode 100644
index 0000000..aa294ff
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
@@ -0,0 +1,450 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+
+ "google.golang.org/protobuf/internal/genid"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/descfmt"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type FileImports []pref.FileImport
+
+func (p *FileImports) Len() int { return len(*p) }
+func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] }
+func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {}
+
+type Names struct {
+ List []pref.Name
+ once sync.Once
+ has map[pref.Name]int // protected by once
+}
+
+func (p *Names) Len() int { return len(p.List) }
+func (p *Names) Get(i int) pref.Name { return p.List[i] }
+func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 }
+func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *Names) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Names) lazyInit() *Names {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.has = make(map[pref.Name]int, len(p.List))
+ for _, s := range p.List {
+ p.has[s] = p.has[s] + 1
+ }
+ }
+ })
+ return p
+}
+
+// CheckValid reports any errors with the set of names with an error message
+// that completes the sentence: "ranges is invalid because it has ..."
+func (p *Names) CheckValid() error {
+ for s, n := range p.lazyInit().has {
+ switch {
+ case n > 1:
+ return errors.New("duplicate name: %q", s)
+ case false && !s.IsValid():
+ // NOTE: The C++ implementation does not validate the identifier.
+ // See https://github.com/protocolbuffers/protobuf/issues/6335.
+ return errors.New("invalid name: %q", s)
+ }
+ }
+ return nil
+}
+
+type EnumRanges struct {
+ List [][2]pref.EnumNumber // start inclusive; end inclusive
+ once sync.Once
+ sorted [][2]pref.EnumNumber // protected by once
+}
+
+func (p *EnumRanges) Len() int { return len(p.List) }
+func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] }
+func (p *EnumRanges) Has(n pref.EnumNumber) bool {
+ for ls := p.lazyInit().sorted; len(ls) > 0; {
+ i := len(ls) / 2
+ switch r := enumRange(ls[i]); {
+ case n < r.Start():
+ ls = ls[:i] // search lower
+ case n > r.End():
+ ls = ls[i+1:] // search upper
+ default:
+ return true
+ }
+ }
+ return false
+}
+func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {}
+func (p *EnumRanges) lazyInit() *EnumRanges {
+ p.once.Do(func() {
+ p.sorted = append(p.sorted, p.List...)
+ sort.Slice(p.sorted, func(i, j int) bool {
+ return p.sorted[i][0] < p.sorted[j][0]
+ })
+ })
+ return p
+}
+
+// CheckValid reports any errors with the set of names with an error message
+// that completes the sentence: "ranges is invalid because it has ..."
+func (p *EnumRanges) CheckValid() error {
+ var rp enumRange
+ for i, r := range p.lazyInit().sorted {
+ r := enumRange(r)
+ switch {
+ case !(r.Start() <= r.End()):
+ return errors.New("invalid range: %v", r)
+ case !(rp.End() < r.Start()) && i > 0:
+ return errors.New("overlapping ranges: %v with %v", rp, r)
+ }
+ rp = r
+ }
+ return nil
+}
+
+type enumRange [2]protoreflect.EnumNumber
+
+func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive
+func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive
+func (r enumRange) String() string {
+ if r.Start() == r.End() {
+ return fmt.Sprintf("%d", r.Start())
+ }
+ return fmt.Sprintf("%d to %d", r.Start(), r.End())
+}
+
+type FieldRanges struct {
+ List [][2]pref.FieldNumber // start inclusive; end exclusive
+ once sync.Once
+ sorted [][2]pref.FieldNumber // protected by once
+}
+
+func (p *FieldRanges) Len() int { return len(p.List) }
+func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] }
+func (p *FieldRanges) Has(n pref.FieldNumber) bool {
+ for ls := p.lazyInit().sorted; len(ls) > 0; {
+ i := len(ls) / 2
+ switch r := fieldRange(ls[i]); {
+ case n < r.Start():
+ ls = ls[:i] // search lower
+ case n > r.End():
+ ls = ls[i+1:] // search upper
+ default:
+ return true
+ }
+ }
+ return false
+}
+func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {}
+func (p *FieldRanges) lazyInit() *FieldRanges {
+ p.once.Do(func() {
+ p.sorted = append(p.sorted, p.List...)
+ sort.Slice(p.sorted, func(i, j int) bool {
+ return p.sorted[i][0] < p.sorted[j][0]
+ })
+ })
+ return p
+}
+
+// CheckValid reports any errors with the set of ranges with an error message
+// that completes the sentence: "ranges is invalid because it has ..."
+func (p *FieldRanges) CheckValid(isMessageSet bool) error {
+ var rp fieldRange
+ for i, r := range p.lazyInit().sorted {
+ r := fieldRange(r)
+ switch {
+ case !isValidFieldNumber(r.Start(), isMessageSet):
+ return errors.New("invalid field number: %d", r.Start())
+ case !isValidFieldNumber(r.End(), isMessageSet):
+ return errors.New("invalid field number: %d", r.End())
+ case !(r.Start() <= r.End()):
+ return errors.New("invalid range: %v", r)
+ case !(rp.End() < r.Start()) && i > 0:
+ return errors.New("overlapping ranges: %v with %v", rp, r)
+ }
+ rp = r
+ }
+ return nil
+}
+
+// isValidFieldNumber reports whether the field number is valid.
+// Unlike the FieldNumber.IsValid method, it allows ranges that cover the
+// reserved number range.
+func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool {
+ return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet)
+}
+
+// CheckOverlap reports an error if p and q overlap.
+func (p *FieldRanges) CheckOverlap(q *FieldRanges) error {
+ rps := p.lazyInit().sorted
+ rqs := q.lazyInit().sorted
+ for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); {
+ rp := fieldRange(rps[pi])
+ rq := fieldRange(rqs[qi])
+ if !(rp.End() < rq.Start() || rq.End() < rp.Start()) {
+ return errors.New("overlapping ranges: %v with %v", rp, rq)
+ }
+ if rp.Start() < rq.Start() {
+ pi++
+ } else {
+ qi++
+ }
+ }
+ return nil
+}
+
+type fieldRange [2]protoreflect.FieldNumber
+
+func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive
+func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive
+func (r fieldRange) String() string {
+ if r.Start() == r.End() {
+ return fmt.Sprintf("%d", r.Start())
+ }
+ return fmt.Sprintf("%d to %d", r.Start(), r.End())
+}
+
+type FieldNumbers struct {
+ List []pref.FieldNumber
+ once sync.Once
+ has map[pref.FieldNumber]struct{} // protected by once
+}
+
+func (p *FieldNumbers) Len() int { return len(p.List) }
+func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] }
+func (p *FieldNumbers) Has(n pref.FieldNumber) bool {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.has = make(map[pref.FieldNumber]struct{}, len(p.List))
+ for _, n := range p.List {
+ p.has[n] = struct{}{}
+ }
+ }
+ })
+ _, ok := p.has[n]
+ return ok
+}
+func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {}
+
+type OneofFields struct {
+ List []pref.FieldDescriptor
+ once sync.Once
+ byName map[pref.Name]pref.FieldDescriptor // protected by once
+ byJSON map[string]pref.FieldDescriptor // protected by once
+ byText map[string]pref.FieldDescriptor // protected by once
+ byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once
+}
+
+func (p *OneofFields) Len() int { return len(p.List) }
+func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] }
+func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] }
+func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] }
+func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] }
+func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] }
+func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {}
+
+func (p *OneofFields) lazyInit() *OneofFields {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List))
+ p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List))
+ p.byText = make(map[string]pref.FieldDescriptor, len(p.List))
+ p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List))
+ for _, f := range p.List {
+ // Field names and numbers are guaranteed to be unique.
+ p.byName[f.Name()] = f
+ p.byJSON[f.JSONName()] = f
+ p.byText[f.TextName()] = f
+ p.byNum[f.Number()] = f
+ }
+ }
+ })
+ return p
+}
+
+type SourceLocations struct {
+ // List is a list of SourceLocations.
+ // The SourceLocation.Next field does not need to be populated
+ // as it will be lazily populated upon first need.
+ List []pref.SourceLocation
+
+ // File is the parent file descriptor that these locations are relative to.
+ // If non-nil, ByDescriptor verifies that the provided descriptor
+ // is a child of this file descriptor.
+ File pref.FileDescriptor
+
+ once sync.Once
+ byPath map[pathKey]int
+}
+
+func (p *SourceLocations) Len() int { return len(p.List) }
+func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] }
+func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation {
+ if i, ok := p.lazyInit().byPath[k]; ok {
+ return p.List[i]
+ }
+ return pref.SourceLocation{}
+}
+func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation {
+ return p.byKey(newPathKey(path))
+}
+func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation {
+ if p.File != nil && desc != nil && p.File != desc.ParentFile() {
+ return pref.SourceLocation{} // mismatching parent files
+ }
+ var pathArr [16]int32
+ path := pathArr[:0]
+ for {
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ // Reverse the path since it was constructed in reverse.
+ for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
+ path[i], path[j] = path[j], path[i]
+ }
+ return p.byKey(newPathKey(path))
+ case pref.MessageDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number))
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_NestedType_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.FieldDescriptor:
+ isExtension := desc.(pref.FieldDescriptor).IsExtension()
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ if isExtension {
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ path = append(path, int32(genid.FileDescriptorProto_Extension_field_number))
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_Extension_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ } else {
+ switch desc.(type) {
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_Field_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ }
+ case pref.OneofDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.EnumDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number))
+ case pref.MessageDescriptor:
+ path = append(path, int32(genid.DescriptorProto_EnumType_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.EnumValueDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.EnumDescriptor:
+ path = append(path, int32(genid.EnumDescriptorProto_Value_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.ServiceDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.FileDescriptor:
+ path = append(path, int32(genid.FileDescriptorProto_Service_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ case pref.MethodDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case pref.ServiceDescriptor:
+ path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number))
+ default:
+ return pref.SourceLocation{}
+ }
+ default:
+ return pref.SourceLocation{}
+ }
+ }
+}
+func (p *SourceLocations) lazyInit() *SourceLocations {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ // Collect all the indexes for a given path.
+ pathIdxs := make(map[pathKey][]int, len(p.List))
+ for i, l := range p.List {
+ k := newPathKey(l.Path)
+ pathIdxs[k] = append(pathIdxs[k], i)
+ }
+
+ // Update the next index for all locations.
+ p.byPath = make(map[pathKey]int, len(p.List))
+ for k, idxs := range pathIdxs {
+ for i := 0; i < len(idxs)-1; i++ {
+ p.List[idxs[i]].Next = idxs[i+1]
+ }
+ p.List[idxs[len(idxs)-1]].Next = 0
+ p.byPath[k] = idxs[0] // record the first location for this path
+ }
+ }
+ })
+ return p
+}
+func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {}
+
+// pathKey is a comparable representation of protoreflect.SourcePath.
+type pathKey struct {
+ arr [16]uint8 // first n-1 path segments; last element is the length
+ str string // used if the path does not fit in arr
+}
+
+func newPathKey(p pref.SourcePath) (k pathKey) {
+ if len(p) < len(k.arr) {
+ for i, ps := range p {
+ if ps < 0 || math.MaxUint8 <= ps {
+ return pathKey{str: p.String()}
+ }
+ k.arr[i] = uint8(ps)
+ }
+ k.arr[len(k.arr)-1] = uint8(len(p))
+ return k
+ }
+ return pathKey{str: p.String()}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
new file mode 100644
index 0000000..30db19f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
@@ -0,0 +1,356 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package filedesc
+
+import (
+ "fmt"
+ "sync"
+
+ "google.golang.org/protobuf/internal/descfmt"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type Enums struct {
+ List []Enum
+ once sync.Once
+ byName map[protoreflect.Name]*Enum // protected by once
+}
+
+func (p *Enums) Len() int {
+ return len(p.List)
+}
+func (p *Enums) Get(i int) protoreflect.EnumDescriptor {
+ return &p.List[i]
+}
+func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Enums) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Enums) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Enums) lazyInit() *Enums {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Enum, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type EnumValues struct {
+ List []EnumValue
+ once sync.Once
+ byName map[protoreflect.Name]*EnumValue // protected by once
+ byNum map[protoreflect.EnumNumber]*EnumValue // protected by once
+}
+
+func (p *EnumValues) Len() int {
+ return len(p.List)
+}
+func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor {
+ return &p.List[i]
+}
+func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
+ if d := p.lazyInit().byNum[n]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *EnumValues) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {}
+func (p *EnumValues) lazyInit() *EnumValues {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List))
+ p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ if _, ok := p.byNum[d.Number()]; !ok {
+ p.byNum[d.Number()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Messages struct {
+ List []Message
+ once sync.Once
+ byName map[protoreflect.Name]*Message // protected by once
+}
+
+func (p *Messages) Len() int {
+ return len(p.List)
+}
+func (p *Messages) Get(i int) protoreflect.MessageDescriptor {
+ return &p.List[i]
+}
+func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Messages) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Messages) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Messages) lazyInit() *Messages {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Message, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Fields struct {
+ List []Field
+ once sync.Once
+ byName map[protoreflect.Name]*Field // protected by once
+ byJSON map[string]*Field // protected by once
+ byText map[string]*Field // protected by once
+ byNum map[protoreflect.FieldNumber]*Field // protected by once
+}
+
+func (p *Fields) Len() int {
+ return len(p.List)
+}
+func (p *Fields) Get(i int) protoreflect.FieldDescriptor {
+ return &p.List[i]
+}
+func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byJSON[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byText[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byNum[n]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Fields) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Fields) lazyInit() *Fields {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Field, len(p.List))
+ p.byJSON = make(map[string]*Field, len(p.List))
+ p.byText = make(map[string]*Field, len(p.List))
+ p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ if _, ok := p.byJSON[d.JSONName()]; !ok {
+ p.byJSON[d.JSONName()] = d
+ }
+ if _, ok := p.byText[d.TextName()]; !ok {
+ p.byText[d.TextName()] = d
+ }
+ if _, ok := p.byNum[d.Number()]; !ok {
+ p.byNum[d.Number()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Oneofs struct {
+ List []Oneof
+ once sync.Once
+ byName map[protoreflect.Name]*Oneof // protected by once
+}
+
+func (p *Oneofs) Len() int {
+ return len(p.List)
+}
+func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor {
+ return &p.List[i]
+}
+func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Oneofs) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Oneofs) lazyInit() *Oneofs {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Oneof, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Extensions struct {
+ List []Extension
+ once sync.Once
+ byName map[protoreflect.Name]*Extension // protected by once
+}
+
+func (p *Extensions) Len() int {
+ return len(p.List)
+}
+func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor {
+ return &p.List[i]
+}
+func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Extensions) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Extensions) lazyInit() *Extensions {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Extension, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Services struct {
+ List []Service
+ once sync.Once
+ byName map[protoreflect.Name]*Service // protected by once
+}
+
+func (p *Services) Len() int {
+ return len(p.List)
+}
+func (p *Services) Get(i int) protoreflect.ServiceDescriptor {
+ return &p.List[i]
+}
+func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Services) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Services) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Services) lazyInit() *Services {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Service, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Methods struct {
+ List []Method
+ once sync.Once
+ byName map[protoreflect.Name]*Method // protected by once
+}
+
+func (p *Methods) Len() int {
+ return len(p.List)
+}
+func (p *Methods) Get(i int) protoreflect.MethodDescriptor {
+ return &p.List[i]
+}
+func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Methods) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Methods) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Methods) lazyInit() *Methods {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Method, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
new file mode 100644
index 0000000..dbf2c60
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
@@ -0,0 +1,107 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var (
+ emptyNames = new(Names)
+ emptyEnumRanges = new(EnumRanges)
+ emptyFieldRanges = new(FieldRanges)
+ emptyFieldNumbers = new(FieldNumbers)
+ emptySourceLocations = new(SourceLocations)
+
+ emptyFiles = new(FileImports)
+ emptyMessages = new(Messages)
+ emptyFields = new(Fields)
+ emptyOneofs = new(Oneofs)
+ emptyEnums = new(Enums)
+ emptyEnumValues = new(EnumValues)
+ emptyExtensions = new(Extensions)
+ emptyServices = new(Services)
+)
+
+// PlaceholderFile is a placeholder, representing only the file path.
+type PlaceholderFile string
+
+func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f }
+func (f PlaceholderFile) Parent() pref.Descriptor { return nil }
+func (f PlaceholderFile) Index() int { return 0 }
+func (f PlaceholderFile) Syntax() pref.Syntax { return 0 }
+func (f PlaceholderFile) Name() pref.Name { return "" }
+func (f PlaceholderFile) FullName() pref.FullName { return "" }
+func (f PlaceholderFile) IsPlaceholder() bool { return true }
+func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File }
+func (f PlaceholderFile) Path() string { return string(f) }
+func (f PlaceholderFile) Package() pref.FullName { return "" }
+func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles }
+func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages }
+func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums }
+func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
+func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices }
+func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations }
+func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return }
+func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return }
+
+// PlaceholderEnum is a placeholder, representing only the full name.
+type PlaceholderEnum pref.FullName
+
+func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil }
+func (e PlaceholderEnum) Parent() pref.Descriptor { return nil }
+func (e PlaceholderEnum) Index() int { return 0 }
+func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 }
+func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() }
+func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) }
+func (e PlaceholderEnum) IsPlaceholder() bool { return true }
+func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum }
+func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues }
+func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames }
+func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges }
+func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return }
+func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
+
+// PlaceholderEnumValue is a placeholder, representing only the full name.
+type PlaceholderEnumValue pref.FullName
+
+func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil }
+func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil }
+func (e PlaceholderEnumValue) Index() int { return 0 }
+func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 }
+func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() }
+func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) }
+func (e PlaceholderEnumValue) IsPlaceholder() bool { return true }
+func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue }
+func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 }
+func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return }
+func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return }
+
+// PlaceholderMessage is a placeholder, representing only the full name.
+type PlaceholderMessage pref.FullName
+
+func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil }
+func (m PlaceholderMessage) Parent() pref.Descriptor { return nil }
+func (m PlaceholderMessage) Index() int { return 0 }
+func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 }
+func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() }
+func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) }
+func (m PlaceholderMessage) IsPlaceholder() bool { return true }
+func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message }
+func (m PlaceholderMessage) IsMapEntry() bool { return false }
+func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields }
+func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs }
+func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames }
+func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges }
+func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers }
+func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges }
+func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") }
+func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages }
+func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums }
+func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
+func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return }
+func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
new file mode 100644
index 0000000..0a0dd35
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -0,0 +1,297 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filetype provides functionality for wrapping descriptors
+// with Go type information.
+package filetype
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/internal/descopts"
+ fdesc "google.golang.org/protobuf/internal/filedesc"
+ pimpl "google.golang.org/protobuf/internal/impl"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Builder constructs type descriptors from a raw file descriptor
+// and associated Go types for each enum and message declaration.
+//
+//
+// Flattened Ordering
+//
+// The protobuf type system represents declarations as a tree. Certain nodes in
+// the tree require us to either associate it with a concrete Go type or to
+// resolve a dependency, which is information that must be provided separately
+// since it cannot be derived from the file descriptor alone.
+//
+// However, representing a tree as Go literals is difficult to simply do in a
+// space and time efficient way. Thus, we store them as a flattened list of
+// objects where the serialization order from the tree-based form is important.
+//
+// The "flattened ordering" is defined as a tree traversal of all enum, message,
+// extension, and service declarations using the following algorithm:
+//
+// def VisitFileDecls(fd):
+// for e in fd.Enums: yield e
+// for m in fd.Messages: yield m
+// for x in fd.Extensions: yield x
+// for s in fd.Services: yield s
+// for m in fd.Messages: yield from VisitMessageDecls(m)
+//
+// def VisitMessageDecls(md):
+// for e in md.Enums: yield e
+// for m in md.Messages: yield m
+// for x in md.Extensions: yield x
+// for m in md.Messages: yield from VisitMessageDecls(m)
+//
+// The traversal starts at the root file descriptor and yields each direct
+// declaration within each node before traversing into sub-declarations
+// that children themselves may have.
+type Builder struct {
+ // File is the underlying file descriptor builder.
+ File fdesc.Builder
+
+ // GoTypes is a unique set of the Go types for all declarations and
+ // dependencies. Each type is represented as a zero value of the Go type.
+ //
+ // Declarations are Go types generated for enums and messages directly
+ // declared (not publicly imported) in the proto source file.
+ // Messages for map entries are accounted for, but represented by nil.
+ // Enum declarations in "flattened ordering" come first, followed by
+ // message declarations in "flattened ordering".
+ //
+ // Dependencies are Go types for enums or messages referenced by
+ // message fields (excluding weak fields), for parent extended messages of
+ // extension fields, for enums or messages referenced by extension fields,
+ // and for input and output messages referenced by service methods.
+ // Dependencies must come after declarations, but the ordering of
+ // dependencies themselves is unspecified.
+ GoTypes []interface{}
+
+ // DependencyIndexes is an ordered list of indexes into GoTypes for the
+ // dependencies of messages, extensions, or services.
+ //
+ // There are 5 sub-lists in "flattened ordering" concatenated back-to-back:
+ // 0. Message field dependencies: list of the enum or message type
+ // referred to by every message field.
+ // 1. Extension field targets: list of the extended parent message of
+ // every extension.
+ // 2. Extension field dependencies: list of the enum or message type
+ // referred to by every extension field.
+ // 3. Service method inputs: list of the input message type
+ // referred to by every service method.
+ // 4. Service method outputs: list of the output message type
+ // referred to by every service method.
+ //
+ // The offset into DependencyIndexes for the start of each sub-list
+ // is appended to the end in reverse order.
+ DependencyIndexes []int32
+
+ // EnumInfos is a list of enum infos in "flattened ordering".
+ EnumInfos []pimpl.EnumInfo
+
+ // MessageInfos is a list of message infos in "flattened ordering".
+ // If provided, the GoType and PBType for each element is populated.
+ //
+ // Requirement: len(MessageInfos) == len(Build.Messages)
+ MessageInfos []pimpl.MessageInfo
+
+ // ExtensionInfos is a list of extension infos in "flattened ordering".
+ // Each element is initialized and registered with the protoregistry package.
+ //
+ // Requirement: len(LegacyExtensions) == len(Build.Extensions)
+ ExtensionInfos []pimpl.ExtensionInfo
+
+ // TypeRegistry is the registry to register each type descriptor.
+ // If nil, it uses protoregistry.GlobalTypes.
+ TypeRegistry interface {
+ RegisterMessage(pref.MessageType) error
+ RegisterEnum(pref.EnumType) error
+ RegisterExtension(pref.ExtensionType) error
+ }
+}
+
+// Out is the output of the builder.
+type Out struct {
+ File pref.FileDescriptor
+}
+
+func (tb Builder) Build() (out Out) {
+ // Replace the resolver with one that resolves dependencies by index,
+ // which is faster and more reliable than relying on the global registry.
+ if tb.File.FileRegistry == nil {
+ tb.File.FileRegistry = preg.GlobalFiles
+ }
+ tb.File.FileRegistry = &resolverByIndex{
+ goTypes: tb.GoTypes,
+ depIdxs: tb.DependencyIndexes,
+ fileRegistry: tb.File.FileRegistry,
+ }
+
+ // Initialize registry if unpopulated.
+ if tb.TypeRegistry == nil {
+ tb.TypeRegistry = preg.GlobalTypes
+ }
+
+ fbOut := tb.File.Build()
+ out.File = fbOut.File
+
+ // Process enums.
+ enumGoTypes := tb.GoTypes[:len(fbOut.Enums)]
+ if len(tb.EnumInfos) != len(fbOut.Enums) {
+ panic("mismatching enum lengths")
+ }
+ if len(fbOut.Enums) > 0 {
+ for i := range fbOut.Enums {
+ tb.EnumInfos[i] = pimpl.EnumInfo{
+ GoReflectType: reflect.TypeOf(enumGoTypes[i]),
+ Desc: &fbOut.Enums[i],
+ }
+ // Register enum types.
+ if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ // Process messages.
+ messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)]
+ if len(tb.MessageInfos) != len(fbOut.Messages) {
+ panic("mismatching message lengths")
+ }
+ if len(fbOut.Messages) > 0 {
+ for i := range fbOut.Messages {
+ if messageGoTypes[i] == nil {
+ continue // skip map entry
+ }
+
+ tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i])
+ tb.MessageInfos[i].Desc = &fbOut.Messages[i]
+
+ // Register message types.
+ if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil {
+ panic(err)
+ }
+ }
+
+ // As a special-case for descriptor.proto,
+ // locally register concrete message type for the options.
+ if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" {
+ for i := range fbOut.Messages {
+ switch fbOut.Messages[i].Name() {
+ case "FileOptions":
+ descopts.File = messageGoTypes[i].(pref.ProtoMessage)
+ case "EnumOptions":
+ descopts.Enum = messageGoTypes[i].(pref.ProtoMessage)
+ case "EnumValueOptions":
+ descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage)
+ case "MessageOptions":
+ descopts.Message = messageGoTypes[i].(pref.ProtoMessage)
+ case "FieldOptions":
+ descopts.Field = messageGoTypes[i].(pref.ProtoMessage)
+ case "OneofOptions":
+ descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage)
+ case "ExtensionRangeOptions":
+ descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage)
+ case "ServiceOptions":
+ descopts.Service = messageGoTypes[i].(pref.ProtoMessage)
+ case "MethodOptions":
+ descopts.Method = messageGoTypes[i].(pref.ProtoMessage)
+ }
+ }
+ }
+ }
+
+ // Process extensions.
+ if len(tb.ExtensionInfos) != len(fbOut.Extensions) {
+ panic("mismatching extension lengths")
+ }
+ var depIdx int32
+ for i := range fbOut.Extensions {
+ // For enum and message kinds, determine the referent Go type so
+ // that we can construct their constructors.
+ const listExtDeps = 2
+ var goType reflect.Type
+ switch fbOut.Extensions[i].L1.Kind {
+ case pref.EnumKind:
+ j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
+ goType = reflect.TypeOf(tb.GoTypes[j])
+ depIdx++
+ case pref.MessageKind, pref.GroupKind:
+ j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
+ goType = reflect.TypeOf(tb.GoTypes[j])
+ depIdx++
+ default:
+ goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind]
+ }
+ if fbOut.Extensions[i].IsList() {
+ goType = reflect.SliceOf(goType)
+ }
+
+ pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType)
+
+ // Register extension types.
+ if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil {
+ panic(err)
+ }
+ }
+
+ return out
+}
+
+var goTypeForPBKind = map[pref.Kind]reflect.Type{
+ pref.BoolKind: reflect.TypeOf(bool(false)),
+ pref.Int32Kind: reflect.TypeOf(int32(0)),
+ pref.Sint32Kind: reflect.TypeOf(int32(0)),
+ pref.Sfixed32Kind: reflect.TypeOf(int32(0)),
+ pref.Int64Kind: reflect.TypeOf(int64(0)),
+ pref.Sint64Kind: reflect.TypeOf(int64(0)),
+ pref.Sfixed64Kind: reflect.TypeOf(int64(0)),
+ pref.Uint32Kind: reflect.TypeOf(uint32(0)),
+ pref.Fixed32Kind: reflect.TypeOf(uint32(0)),
+ pref.Uint64Kind: reflect.TypeOf(uint64(0)),
+ pref.Fixed64Kind: reflect.TypeOf(uint64(0)),
+ pref.FloatKind: reflect.TypeOf(float32(0)),
+ pref.DoubleKind: reflect.TypeOf(float64(0)),
+ pref.StringKind: reflect.TypeOf(string("")),
+ pref.BytesKind: reflect.TypeOf([]byte(nil)),
+}
+
+type depIdxs []int32
+
+// Get retrieves the jth element of the ith sub-list.
+func (x depIdxs) Get(i, j int32) int32 {
+ return x[x[int32(len(x))-i-1]+j]
+}
+
+type (
+ resolverByIndex struct {
+ goTypes []interface{}
+ depIdxs depIdxs
+ fileRegistry
+ }
+ fileRegistry interface {
+ FindFileByPath(string) (pref.FileDescriptor, error)
+ FindDescriptorByName(pref.FullName) (pref.Descriptor, error)
+ RegisterFile(pref.FileDescriptor) error
+ }
+)
+
+func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor {
+ if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) {
+ return &es[depIdx]
+ } else {
+ return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx])
+ }
+}
+
+func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor {
+ if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) {
+ return &ms[depIdx-len(es)]
+ } else {
+ return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx])
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go
new file mode 100644
index 0000000..58372dd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go
@@ -0,0 +1,24 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flags provides a set of flags controlled by build tags.
+package flags
+
+// ProtoLegacy specifies whether to enable support for legacy functionality
+// such as MessageSets, weak fields, and various other obscure behavior
+// that is necessary to maintain backwards compatibility with proto1 or
+// the pre-release variants of proto2 and proto3.
+//
+// This is disabled by default unless built with the "protolegacy" tag.
+//
+// WARNING: The compatibility agreement covers nothing provided by this flag.
+// As such, functionality may suddenly be removed or changed at our discretion.
+const ProtoLegacy = protoLegacy
+
+// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions.
+//
+// Lazy extension unmarshaling validates the contents of message-valued
+// extension fields at unmarshal time, but defers creating the message
+// structure until the extension is first accessed.
+const LazyUnmarshalExtensions = ProtoLegacy
diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go
new file mode 100644
index 0000000..a72995f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !protolegacy
+
+package flags
+
+const protoLegacy = false
diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go
new file mode 100644
index 0000000..772e2f0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build protolegacy
+
+package flags
+
+const protoLegacy = true
diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go
new file mode 100644
index 0000000..e6f7d47
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_any_proto = "google/protobuf/any.proto"
+
+// Names for google.protobuf.Any.
+const (
+ Any_message_name protoreflect.Name = "Any"
+ Any_message_fullname protoreflect.FullName = "google.protobuf.Any"
+)
+
+// Field names for google.protobuf.Any.
+const (
+ Any_TypeUrl_field_name protoreflect.Name = "type_url"
+ Any_Value_field_name protoreflect.Name = "value"
+
+ Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url"
+ Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value"
+)
+
+// Field numbers for google.protobuf.Any.
+const (
+ Any_TypeUrl_field_number protoreflect.FieldNumber = 1
+ Any_Value_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
new file mode 100644
index 0000000..df8f918
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
@@ -0,0 +1,106 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_api_proto = "google/protobuf/api.proto"
+
+// Names for google.protobuf.Api.
+const (
+ Api_message_name protoreflect.Name = "Api"
+ Api_message_fullname protoreflect.FullName = "google.protobuf.Api"
+)
+
+// Field names for google.protobuf.Api.
+const (
+ Api_Name_field_name protoreflect.Name = "name"
+ Api_Methods_field_name protoreflect.Name = "methods"
+ Api_Options_field_name protoreflect.Name = "options"
+ Api_Version_field_name protoreflect.Name = "version"
+ Api_SourceContext_field_name protoreflect.Name = "source_context"
+ Api_Mixins_field_name protoreflect.Name = "mixins"
+ Api_Syntax_field_name protoreflect.Name = "syntax"
+
+ Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name"
+ Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods"
+ Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options"
+ Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version"
+ Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
+ Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins"
+ Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax"
+)
+
+// Field numbers for google.protobuf.Api.
+const (
+ Api_Name_field_number protoreflect.FieldNumber = 1
+ Api_Methods_field_number protoreflect.FieldNumber = 2
+ Api_Options_field_number protoreflect.FieldNumber = 3
+ Api_Version_field_number protoreflect.FieldNumber = 4
+ Api_SourceContext_field_number protoreflect.FieldNumber = 5
+ Api_Mixins_field_number protoreflect.FieldNumber = 6
+ Api_Syntax_field_number protoreflect.FieldNumber = 7
+)
+
+// Names for google.protobuf.Method.
+const (
+ Method_message_name protoreflect.Name = "Method"
+ Method_message_fullname protoreflect.FullName = "google.protobuf.Method"
+)
+
+// Field names for google.protobuf.Method.
+const (
+ Method_Name_field_name protoreflect.Name = "name"
+ Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url"
+ Method_RequestStreaming_field_name protoreflect.Name = "request_streaming"
+ Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url"
+ Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
+ Method_Options_field_name protoreflect.Name = "options"
+ Method_Syntax_field_name protoreflect.Name = "syntax"
+
+ Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name"
+ Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url"
+ Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming"
+ Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url"
+ Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
+ Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options"
+ Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax"
+)
+
+// Field numbers for google.protobuf.Method.
+const (
+ Method_Name_field_number protoreflect.FieldNumber = 1
+ Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2
+ Method_RequestStreaming_field_number protoreflect.FieldNumber = 3
+ Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4
+ Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
+ Method_Options_field_number protoreflect.FieldNumber = 6
+ Method_Syntax_field_number protoreflect.FieldNumber = 7
+)
+
+// Names for google.protobuf.Mixin.
+const (
+ Mixin_message_name protoreflect.Name = "Mixin"
+ Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin"
+)
+
+// Field names for google.protobuf.Mixin.
+const (
+ Mixin_Name_field_name protoreflect.Name = "name"
+ Mixin_Root_field_name protoreflect.Name = "root"
+
+ Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name"
+ Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root"
+)
+
+// Field numbers for google.protobuf.Mixin.
+const (
+ Mixin_Name_field_number protoreflect.FieldNumber = 1
+ Mixin_Root_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
new file mode 100644
index 0000000..e3cdf1c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -0,0 +1,829 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto"
+
+// Names for google.protobuf.FileDescriptorSet.
+const (
+ FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet"
+ FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet"
+)
+
+// Field names for google.protobuf.FileDescriptorSet.
+const (
+ FileDescriptorSet_File_field_name protoreflect.Name = "file"
+
+ FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file"
+)
+
+// Field numbers for google.protobuf.FileDescriptorSet.
+const (
+ FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.FileDescriptorProto.
+const (
+ FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto"
+ FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto"
+)
+
+// Field names for google.protobuf.FileDescriptorProto.
+const (
+ FileDescriptorProto_Name_field_name protoreflect.Name = "name"
+ FileDescriptorProto_Package_field_name protoreflect.Name = "package"
+ FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency"
+ FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
+ FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency"
+ FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type"
+ FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type"
+ FileDescriptorProto_Service_field_name protoreflect.Name = "service"
+ FileDescriptorProto_Extension_field_name protoreflect.Name = "extension"
+ FileDescriptorProto_Options_field_name protoreflect.Name = "options"
+ FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info"
+ FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax"
+
+ FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name"
+ FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package"
+ FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
+ FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
+ FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
+ FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
+ FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
+ FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
+ FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension"
+ FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options"
+ FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info"
+ FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax"
+)
+
+// Field numbers for google.protobuf.FileDescriptorProto.
+const (
+ FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2
+ FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3
+ FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
+ FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11
+ FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4
+ FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5
+ FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6
+ FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7
+ FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
+ FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9
+ FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12
+)
+
+// Names for google.protobuf.DescriptorProto.
+const (
+ DescriptorProto_message_name protoreflect.Name = "DescriptorProto"
+ DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto"
+)
+
+// Field names for google.protobuf.DescriptorProto.
+const (
+ DescriptorProto_Name_field_name protoreflect.Name = "name"
+ DescriptorProto_Field_field_name protoreflect.Name = "field"
+ DescriptorProto_Extension_field_name protoreflect.Name = "extension"
+ DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type"
+ DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type"
+ DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range"
+ DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl"
+ DescriptorProto_Options_field_name protoreflect.Name = "options"
+ DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
+ DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+
+ DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name"
+ DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field"
+ DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension"
+ DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type"
+ DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type"
+ DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range"
+ DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl"
+ DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options"
+ DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
+ DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
+)
+
+// Field numbers for google.protobuf.DescriptorProto.
+const (
+ DescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ DescriptorProto_Field_field_number protoreflect.FieldNumber = 2
+ DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6
+ DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3
+ DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4
+ DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5
+ DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8
+ DescriptorProto_Options_field_number protoreflect.FieldNumber = 7
+ DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9
+ DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10
+)
+
+// Names for google.protobuf.DescriptorProto.ExtensionRange.
+const (
+ DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange"
+ DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange"
+)
+
+// Field names for google.protobuf.DescriptorProto.ExtensionRange.
+const (
+ DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start"
+ DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end"
+ DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options"
+
+ DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start"
+ DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end"
+ DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options"
+)
+
+// Field numbers for google.protobuf.DescriptorProto.ExtensionRange.
+const (
+ DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1
+ DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2
+ DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3
+)
+
+// Names for google.protobuf.DescriptorProto.ReservedRange.
+const (
+ DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange"
+ DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange"
+)
+
+// Field names for google.protobuf.DescriptorProto.ReservedRange.
+const (
+ DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start"
+ DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end"
+
+ DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start"
+ DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end"
+)
+
+// Field numbers for google.protobuf.DescriptorProto.ReservedRange.
+const (
+ DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1
+ DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.ExtensionRangeOptions.
+const (
+ ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions"
+ ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions"
+)
+
+// Field names for google.protobuf.ExtensionRangeOptions.
+const (
+ ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.ExtensionRangeOptions.
+const (
+ ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.FieldDescriptorProto.
+const (
+ FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto"
+ FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto"
+)
+
+// Field names for google.protobuf.FieldDescriptorProto.
+const (
+ FieldDescriptorProto_Name_field_name protoreflect.Name = "name"
+ FieldDescriptorProto_Number_field_name protoreflect.Name = "number"
+ FieldDescriptorProto_Label_field_name protoreflect.Name = "label"
+ FieldDescriptorProto_Type_field_name protoreflect.Name = "type"
+ FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name"
+ FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee"
+ FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value"
+ FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index"
+ FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name"
+ FieldDescriptorProto_Options_field_name protoreflect.Name = "options"
+ FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional"
+
+ FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name"
+ FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number"
+ FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label"
+ FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type"
+ FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name"
+ FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee"
+ FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value"
+ FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index"
+ FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name"
+ FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options"
+ FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional"
+)
+
+// Field numbers for google.protobuf.FieldDescriptorProto.
+const (
+ FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3
+ FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4
+ FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5
+ FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6
+ FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2
+ FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7
+ FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9
+ FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10
+ FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
+ FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17
+)
+
+// Full and short names for google.protobuf.FieldDescriptorProto.Type.
+const (
+ FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type"
+ FieldDescriptorProto_Type_enum_name = "Type"
+)
+
+// Full and short names for google.protobuf.FieldDescriptorProto.Label.
+const (
+ FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label"
+ FieldDescriptorProto_Label_enum_name = "Label"
+)
+
+// Names for google.protobuf.OneofDescriptorProto.
+const (
+ OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto"
+ OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto"
+)
+
+// Field names for google.protobuf.OneofDescriptorProto.
+const (
+ OneofDescriptorProto_Name_field_name protoreflect.Name = "name"
+ OneofDescriptorProto_Options_field_name protoreflect.Name = "options"
+
+ OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name"
+ OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options"
+)
+
+// Field numbers for google.protobuf.OneofDescriptorProto.
+const (
+ OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.EnumDescriptorProto.
+const (
+ EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto"
+ EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto"
+)
+
+// Field names for google.protobuf.EnumDescriptorProto.
+const (
+ EnumDescriptorProto_Name_field_name protoreflect.Name = "name"
+ EnumDescriptorProto_Value_field_name protoreflect.Name = "value"
+ EnumDescriptorProto_Options_field_name protoreflect.Name = "options"
+ EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
+ EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+
+ EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
+ EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
+ EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
+ EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
+ EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
+)
+
+// Field numbers for google.protobuf.EnumDescriptorProto.
+const (
+ EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2
+ EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3
+ EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
+ EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5
+)
+
+// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
+const (
+ EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange"
+ EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange"
+)
+
+// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
+const (
+ EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start"
+ EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end"
+
+ EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start"
+ EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end"
+)
+
+// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange.
+const (
+ EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1
+ EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.EnumValueDescriptorProto.
+const (
+ EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto"
+ EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto"
+)
+
+// Field names for google.protobuf.EnumValueDescriptorProto.
+const (
+ EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name"
+ EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number"
+ EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options"
+
+ EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name"
+ EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number"
+ EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options"
+)
+
+// Field numbers for google.protobuf.EnumValueDescriptorProto.
+const (
+ EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2
+ EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3
+)
+
+// Names for google.protobuf.ServiceDescriptorProto.
+const (
+ ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto"
+ ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto"
+)
+
+// Field names for google.protobuf.ServiceDescriptorProto.
+const (
+ ServiceDescriptorProto_Name_field_name protoreflect.Name = "name"
+ ServiceDescriptorProto_Method_field_name protoreflect.Name = "method"
+ ServiceDescriptorProto_Options_field_name protoreflect.Name = "options"
+
+ ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name"
+ ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method"
+ ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options"
+)
+
+// Field numbers for google.protobuf.ServiceDescriptorProto.
+const (
+ ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2
+ ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3
+)
+
+// Names for google.protobuf.MethodDescriptorProto.
+const (
+ MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto"
+ MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto"
+)
+
+// Field names for google.protobuf.MethodDescriptorProto.
+const (
+ MethodDescriptorProto_Name_field_name protoreflect.Name = "name"
+ MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type"
+ MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type"
+ MethodDescriptorProto_Options_field_name protoreflect.Name = "options"
+ MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming"
+ MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming"
+
+ MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name"
+ MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type"
+ MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type"
+ MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options"
+ MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming"
+ MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming"
+)
+
+// Field numbers for google.protobuf.MethodDescriptorProto.
+const (
+ MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1
+ MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2
+ MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3
+ MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4
+ MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5
+ MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6
+)
+
+// Names for google.protobuf.FileOptions.
+const (
+ FileOptions_message_name protoreflect.Name = "FileOptions"
+ FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions"
+)
+
+// Field names for google.protobuf.FileOptions.
+const (
+ FileOptions_JavaPackage_field_name protoreflect.Name = "java_package"
+ FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname"
+ FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files"
+ FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash"
+ FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8"
+ FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for"
+ FileOptions_GoPackage_field_name protoreflect.Name = "go_package"
+ FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services"
+ FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services"
+ FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services"
+ FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services"
+ FileOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas"
+ FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix"
+ FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace"
+ FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix"
+ FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix"
+ FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace"
+ FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace"
+ FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package"
+ FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package"
+ FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname"
+ FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files"
+ FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash"
+ FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8"
+ FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for"
+ FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package"
+ FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services"
+ FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services"
+ FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services"
+ FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services"
+ FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated"
+ FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas"
+ FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix"
+ FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace"
+ FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix"
+ FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix"
+ FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace"
+ FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace"
+ FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package"
+ FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.FileOptions.
+const (
+ FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1
+ FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8
+ FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10
+ FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20
+ FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27
+ FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9
+ FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11
+ FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16
+ FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17
+ FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18
+ FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42
+ FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23
+ FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31
+ FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36
+ FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37
+ FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39
+ FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40
+ FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41
+ FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44
+ FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45
+ FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Full and short names for google.protobuf.FileOptions.OptimizeMode.
+const (
+ FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode"
+ FileOptions_OptimizeMode_enum_name = "OptimizeMode"
+)
+
+// Names for google.protobuf.MessageOptions.
+const (
+ MessageOptions_message_name protoreflect.Name = "MessageOptions"
+ MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions"
+)
+
+// Field names for google.protobuf.MessageOptions.
+const (
+ MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format"
+ MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor"
+ MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
+ MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
+ MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor"
+ MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
+ MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
+ MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.MessageOptions.
+const (
+ MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1
+ MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2
+ MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
+ MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
+ MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.FieldOptions.
+const (
+ FieldOptions_message_name protoreflect.Name = "FieldOptions"
+ FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions"
+)
+
+// Field names for google.protobuf.FieldOptions.
+const (
+ FieldOptions_Ctype_field_name protoreflect.Name = "ctype"
+ FieldOptions_Packed_field_name protoreflect.Name = "packed"
+ FieldOptions_Jstype_field_name protoreflect.Name = "jstype"
+ FieldOptions_Lazy_field_name protoreflect.Name = "lazy"
+ FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ FieldOptions_Weak_field_name protoreflect.Name = "weak"
+ FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
+ FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed"
+ FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype"
+ FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy"
+ FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated"
+ FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak"
+ FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.FieldOptions.
+const (
+ FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1
+ FieldOptions_Packed_field_number protoreflect.FieldNumber = 2
+ FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6
+ FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5
+ FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3
+ FieldOptions_Weak_field_number protoreflect.FieldNumber = 10
+ FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Full and short names for google.protobuf.FieldOptions.CType.
+const (
+ FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType"
+ FieldOptions_CType_enum_name = "CType"
+)
+
+// Full and short names for google.protobuf.FieldOptions.JSType.
+const (
+ FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType"
+ FieldOptions_JSType_enum_name = "JSType"
+)
+
+// Names for google.protobuf.OneofOptions.
+const (
+ OneofOptions_message_name protoreflect.Name = "OneofOptions"
+ OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions"
+)
+
+// Field names for google.protobuf.OneofOptions.
+const (
+ OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.OneofOptions.
+const (
+ OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.EnumOptions.
+const (
+ EnumOptions_message_name protoreflect.Name = "EnumOptions"
+ EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions"
+)
+
+// Field names for google.protobuf.EnumOptions.
+const (
+ EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias"
+ EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
+ EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
+ EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.EnumOptions.
+const (
+ EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2
+ EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3
+ EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.EnumValueOptions.
+const (
+ EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions"
+ EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions"
+)
+
+// Field names for google.protobuf.EnumValueOptions.
+const (
+ EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
+ EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.EnumValueOptions.
+const (
+ EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1
+ EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.ServiceOptions.
+const (
+ ServiceOptions_message_name protoreflect.Name = "ServiceOptions"
+ ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions"
+)
+
+// Field names for google.protobuf.ServiceOptions.
+const (
+ ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated"
+ ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.ServiceOptions.
+const (
+ ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33
+ ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Names for google.protobuf.MethodOptions.
+const (
+ MethodOptions_message_name protoreflect.Name = "MethodOptions"
+ MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions"
+)
+
+// Field names for google.protobuf.MethodOptions.
+const (
+ MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level"
+ MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+
+ MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated"
+ MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level"
+ MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option"
+)
+
+// Field numbers for google.protobuf.MethodOptions.
+const (
+ MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33
+ MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34
+ MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+)
+
+// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel.
+const (
+ MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel"
+ MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel"
+)
+
+// Names for google.protobuf.UninterpretedOption.
+const (
+ UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption"
+ UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption"
+)
+
+// Field names for google.protobuf.UninterpretedOption.
+const (
+ UninterpretedOption_Name_field_name protoreflect.Name = "name"
+ UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value"
+ UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value"
+ UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value"
+ UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value"
+ UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value"
+ UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value"
+
+ UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name"
+ UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value"
+ UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value"
+ UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value"
+ UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value"
+ UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value"
+ UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value"
+)
+
+// Field numbers for google.protobuf.UninterpretedOption.
+const (
+ UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2
+ UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3
+ UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4
+ UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5
+ UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6
+ UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7
+ UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8
+)
+
+// Names for google.protobuf.UninterpretedOption.NamePart.
+const (
+ UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart"
+ UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart"
+)
+
+// Field names for google.protobuf.UninterpretedOption.NamePart.
+const (
+ UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part"
+ UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension"
+
+ UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part"
+ UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension"
+)
+
+// Field numbers for google.protobuf.UninterpretedOption.NamePart.
+const (
+ UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1
+ UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.SourceCodeInfo.
+const (
+ SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo"
+ SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo"
+)
+
+// Field names for google.protobuf.SourceCodeInfo.
+const (
+ SourceCodeInfo_Location_field_name protoreflect.Name = "location"
+
+ SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location"
+)
+
+// Field numbers for google.protobuf.SourceCodeInfo.
+const (
+ SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.SourceCodeInfo.Location.
+const (
+ SourceCodeInfo_Location_message_name protoreflect.Name = "Location"
+ SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location"
+)
+
+// Field names for google.protobuf.SourceCodeInfo.Location.
+const (
+ SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path"
+ SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span"
+ SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments"
+ SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments"
+ SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments"
+
+ SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path"
+ SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span"
+ SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments"
+ SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments"
+ SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments"
+)
+
+// Field numbers for google.protobuf.SourceCodeInfo.Location.
+const (
+ SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1
+ SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2
+ SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3
+ SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4
+ SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6
+)
+
+// Names for google.protobuf.GeneratedCodeInfo.
+const (
+ GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo"
+ GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo"
+)
+
+// Field names for google.protobuf.GeneratedCodeInfo.
+const (
+ GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation"
+
+ GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation"
+)
+
+// Field numbers for google.protobuf.GeneratedCodeInfo.
+const (
+ GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.GeneratedCodeInfo.Annotation.
+const (
+ GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation"
+ GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation"
+)
+
+// Field names for google.protobuf.GeneratedCodeInfo.Annotation.
+const (
+ GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path"
+ GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file"
+ GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin"
+ GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end"
+
+ GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path"
+ GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file"
+ GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin"
+ GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end"
+)
+
+// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation.
+const (
+ GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1
+ GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2
+ GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3
+ GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
new file mode 100644
index 0000000..45ccd01
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package genid contains constants for declarations in descriptor.proto
+// and the well-known types.
+package genid
+
+import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+
+const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go
new file mode 100644
index 0000000..b070ef4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_duration_proto = "google/protobuf/duration.proto"
+
+// Names for google.protobuf.Duration.
+const (
+ Duration_message_name protoreflect.Name = "Duration"
+ Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration"
+)
+
+// Field names for google.protobuf.Duration.
+const (
+ Duration_Seconds_field_name protoreflect.Name = "seconds"
+ Duration_Nanos_field_name protoreflect.Name = "nanos"
+
+ Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds"
+ Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos"
+)
+
+// Field numbers for google.protobuf.Duration.
+const (
+ Duration_Seconds_field_number protoreflect.FieldNumber = 1
+ Duration_Nanos_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go
new file mode 100644
index 0000000..762abb3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_empty_proto = "google/protobuf/empty.proto"
+
+// Names for google.protobuf.Empty.
+const (
+ Empty_message_name protoreflect.Name = "Empty"
+ Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go
new file mode 100644
index 0000000..70bed45
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go
@@ -0,0 +1,31 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto"
+
+// Names for google.protobuf.FieldMask.
+const (
+ FieldMask_message_name protoreflect.Name = "FieldMask"
+ FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask"
+)
+
+// Field names for google.protobuf.FieldMask.
+const (
+ FieldMask_Paths_field_name protoreflect.Name = "paths"
+
+ FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths"
+)
+
+// Field numbers for google.protobuf.FieldMask.
+const (
+ FieldMask_Paths_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go
new file mode 100644
index 0000000..693d2e9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+// Go names of implementation-specific struct fields in generated messages.
+const (
+ State_goname = "state"
+
+ SizeCache_goname = "sizeCache"
+ SizeCacheA_goname = "XXX_sizecache"
+
+ WeakFields_goname = "weakFields"
+ WeakFieldsA_goname = "XXX_weak"
+
+ UnknownFields_goname = "unknownFields"
+ UnknownFieldsA_goname = "XXX_unrecognized"
+
+ ExtensionFields_goname = "extensionFields"
+ ExtensionFieldsA_goname = "XXX_InternalExtensions"
+ ExtensionFieldsB_goname = "XXX_extensions"
+
+ WeakFieldPrefix_goname = "XXX_weak_"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
new file mode 100644
index 0000000..8f9ea02
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+
+// Generic field names and numbers for synthetic map entry messages.
+const (
+ MapEntry_Key_field_name protoreflect.Name = "key"
+ MapEntry_Value_field_name protoreflect.Name = "value"
+
+ MapEntry_Key_field_number protoreflect.FieldNumber = 1
+ MapEntry_Value_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go
new file mode 100644
index 0000000..3e99ae1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go
@@ -0,0 +1,31 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto"
+
+// Names for google.protobuf.SourceContext.
+const (
+ SourceContext_message_name protoreflect.Name = "SourceContext"
+ SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext"
+)
+
+// Field names for google.protobuf.SourceContext.
+const (
+ SourceContext_FileName_field_name protoreflect.Name = "file_name"
+
+ SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name"
+)
+
+// Field numbers for google.protobuf.SourceContext.
+const (
+ SourceContext_FileName_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go
new file mode 100644
index 0000000..1a38944
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_struct_proto = "google/protobuf/struct.proto"
+
+// Full and short names for google.protobuf.NullValue.
+const (
+ NullValue_enum_fullname = "google.protobuf.NullValue"
+ NullValue_enum_name = "NullValue"
+)
+
+// Names for google.protobuf.Struct.
+const (
+ Struct_message_name protoreflect.Name = "Struct"
+ Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct"
+)
+
+// Field names for google.protobuf.Struct.
+const (
+ Struct_Fields_field_name protoreflect.Name = "fields"
+
+ Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields"
+)
+
+// Field numbers for google.protobuf.Struct.
+const (
+ Struct_Fields_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.Struct.FieldsEntry.
+const (
+ Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry"
+ Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry"
+)
+
+// Field names for google.protobuf.Struct.FieldsEntry.
+const (
+ Struct_FieldsEntry_Key_field_name protoreflect.Name = "key"
+ Struct_FieldsEntry_Value_field_name protoreflect.Name = "value"
+
+ Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key"
+ Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value"
+)
+
+// Field numbers for google.protobuf.Struct.FieldsEntry.
+const (
+ Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1
+ Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2
+)
+
+// Names for google.protobuf.Value.
+const (
+ Value_message_name protoreflect.Name = "Value"
+ Value_message_fullname protoreflect.FullName = "google.protobuf.Value"
+)
+
+// Field names for google.protobuf.Value.
+const (
+ Value_NullValue_field_name protoreflect.Name = "null_value"
+ Value_NumberValue_field_name protoreflect.Name = "number_value"
+ Value_StringValue_field_name protoreflect.Name = "string_value"
+ Value_BoolValue_field_name protoreflect.Name = "bool_value"
+ Value_StructValue_field_name protoreflect.Name = "struct_value"
+ Value_ListValue_field_name protoreflect.Name = "list_value"
+
+ Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value"
+ Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value"
+ Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value"
+ Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value"
+ Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value"
+ Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value"
+)
+
+// Field numbers for google.protobuf.Value.
+const (
+ Value_NullValue_field_number protoreflect.FieldNumber = 1
+ Value_NumberValue_field_number protoreflect.FieldNumber = 2
+ Value_StringValue_field_number protoreflect.FieldNumber = 3
+ Value_BoolValue_field_number protoreflect.FieldNumber = 4
+ Value_StructValue_field_number protoreflect.FieldNumber = 5
+ Value_ListValue_field_number protoreflect.FieldNumber = 6
+)
+
+// Oneof names for google.protobuf.Value.
+const (
+ Value_Kind_oneof_name protoreflect.Name = "kind"
+
+ Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind"
+)
+
+// Names for google.protobuf.ListValue.
+const (
+ ListValue_message_name protoreflect.Name = "ListValue"
+ ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue"
+)
+
+// Field names for google.protobuf.ListValue.
+const (
+ ListValue_Values_field_name protoreflect.Name = "values"
+
+ ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values"
+)
+
+// Field numbers for google.protobuf.ListValue.
+const (
+ ListValue_Values_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go
new file mode 100644
index 0000000..f5cd563
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto"
+
+// Names for google.protobuf.Timestamp.
+const (
+ Timestamp_message_name protoreflect.Name = "Timestamp"
+ Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp"
+)
+
+// Field names for google.protobuf.Timestamp.
+const (
+ Timestamp_Seconds_field_name protoreflect.Name = "seconds"
+ Timestamp_Nanos_field_name protoreflect.Name = "nanos"
+
+ Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds"
+ Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos"
+)
+
+// Field numbers for google.protobuf.Timestamp.
+const (
+ Timestamp_Seconds_field_number protoreflect.FieldNumber = 1
+ Timestamp_Nanos_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go
new file mode 100644
index 0000000..3bc7101
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go
@@ -0,0 +1,184 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_type_proto = "google/protobuf/type.proto"
+
+// Full and short names for google.protobuf.Syntax.
+const (
+ Syntax_enum_fullname = "google.protobuf.Syntax"
+ Syntax_enum_name = "Syntax"
+)
+
+// Names for google.protobuf.Type.
+const (
+ Type_message_name protoreflect.Name = "Type"
+ Type_message_fullname protoreflect.FullName = "google.protobuf.Type"
+)
+
+// Field names for google.protobuf.Type.
+const (
+ Type_Name_field_name protoreflect.Name = "name"
+ Type_Fields_field_name protoreflect.Name = "fields"
+ Type_Oneofs_field_name protoreflect.Name = "oneofs"
+ Type_Options_field_name protoreflect.Name = "options"
+ Type_SourceContext_field_name protoreflect.Name = "source_context"
+ Type_Syntax_field_name protoreflect.Name = "syntax"
+
+ Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name"
+ Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields"
+ Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs"
+ Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options"
+ Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context"
+ Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax"
+)
+
+// Field numbers for google.protobuf.Type.
+const (
+ Type_Name_field_number protoreflect.FieldNumber = 1
+ Type_Fields_field_number protoreflect.FieldNumber = 2
+ Type_Oneofs_field_number protoreflect.FieldNumber = 3
+ Type_Options_field_number protoreflect.FieldNumber = 4
+ Type_SourceContext_field_number protoreflect.FieldNumber = 5
+ Type_Syntax_field_number protoreflect.FieldNumber = 6
+)
+
+// Names for google.protobuf.Field.
+const (
+ Field_message_name protoreflect.Name = "Field"
+ Field_message_fullname protoreflect.FullName = "google.protobuf.Field"
+)
+
+// Field names for google.protobuf.Field.
+const (
+ Field_Kind_field_name protoreflect.Name = "kind"
+ Field_Cardinality_field_name protoreflect.Name = "cardinality"
+ Field_Number_field_name protoreflect.Name = "number"
+ Field_Name_field_name protoreflect.Name = "name"
+ Field_TypeUrl_field_name protoreflect.Name = "type_url"
+ Field_OneofIndex_field_name protoreflect.Name = "oneof_index"
+ Field_Packed_field_name protoreflect.Name = "packed"
+ Field_Options_field_name protoreflect.Name = "options"
+ Field_JsonName_field_name protoreflect.Name = "json_name"
+ Field_DefaultValue_field_name protoreflect.Name = "default_value"
+
+ Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind"
+ Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality"
+ Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number"
+ Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name"
+ Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url"
+ Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index"
+ Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed"
+ Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options"
+ Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name"
+ Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value"
+)
+
+// Field numbers for google.protobuf.Field.
+const (
+ Field_Kind_field_number protoreflect.FieldNumber = 1
+ Field_Cardinality_field_number protoreflect.FieldNumber = 2
+ Field_Number_field_number protoreflect.FieldNumber = 3
+ Field_Name_field_number protoreflect.FieldNumber = 4
+ Field_TypeUrl_field_number protoreflect.FieldNumber = 6
+ Field_OneofIndex_field_number protoreflect.FieldNumber = 7
+ Field_Packed_field_number protoreflect.FieldNumber = 8
+ Field_Options_field_number protoreflect.FieldNumber = 9
+ Field_JsonName_field_number protoreflect.FieldNumber = 10
+ Field_DefaultValue_field_number protoreflect.FieldNumber = 11
+)
+
+// Full and short names for google.protobuf.Field.Kind.
+const (
+ Field_Kind_enum_fullname = "google.protobuf.Field.Kind"
+ Field_Kind_enum_name = "Kind"
+)
+
+// Full and short names for google.protobuf.Field.Cardinality.
+const (
+ Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality"
+ Field_Cardinality_enum_name = "Cardinality"
+)
+
+// Names for google.protobuf.Enum.
+const (
+ Enum_message_name protoreflect.Name = "Enum"
+ Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum"
+)
+
+// Field names for google.protobuf.Enum.
+const (
+ Enum_Name_field_name protoreflect.Name = "name"
+ Enum_Enumvalue_field_name protoreflect.Name = "enumvalue"
+ Enum_Options_field_name protoreflect.Name = "options"
+ Enum_SourceContext_field_name protoreflect.Name = "source_context"
+ Enum_Syntax_field_name protoreflect.Name = "syntax"
+
+ Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name"
+ Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue"
+ Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options"
+ Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context"
+ Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax"
+)
+
+// Field numbers for google.protobuf.Enum.
+const (
+ Enum_Name_field_number protoreflect.FieldNumber = 1
+ Enum_Enumvalue_field_number protoreflect.FieldNumber = 2
+ Enum_Options_field_number protoreflect.FieldNumber = 3
+ Enum_SourceContext_field_number protoreflect.FieldNumber = 4
+ Enum_Syntax_field_number protoreflect.FieldNumber = 5
+)
+
+// Names for google.protobuf.EnumValue.
+const (
+ EnumValue_message_name protoreflect.Name = "EnumValue"
+ EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue"
+)
+
+// Field names for google.protobuf.EnumValue.
+const (
+ EnumValue_Name_field_name protoreflect.Name = "name"
+ EnumValue_Number_field_name protoreflect.Name = "number"
+ EnumValue_Options_field_name protoreflect.Name = "options"
+
+ EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name"
+ EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number"
+ EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options"
+)
+
+// Field numbers for google.protobuf.EnumValue.
+const (
+ EnumValue_Name_field_number protoreflect.FieldNumber = 1
+ EnumValue_Number_field_number protoreflect.FieldNumber = 2
+ EnumValue_Options_field_number protoreflect.FieldNumber = 3
+)
+
+// Names for google.protobuf.Option.
+const (
+ Option_message_name protoreflect.Name = "Option"
+ Option_message_fullname protoreflect.FullName = "google.protobuf.Option"
+)
+
+// Field names for google.protobuf.Option.
+const (
+ Option_Name_field_name protoreflect.Name = "name"
+ Option_Value_field_name protoreflect.Name = "value"
+
+ Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name"
+ Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value"
+)
+
+// Field numbers for google.protobuf.Option.
+const (
+ Option_Name_field_number protoreflect.FieldNumber = 1
+ Option_Value_field_number protoreflect.FieldNumber = 2
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
new file mode 100644
index 0000000..429384b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+
+// Generic field name and number for messages in wrappers.proto.
+const (
+ WrapperValue_Value_field_name protoreflect.Name = "value"
+ WrapperValue_Value_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go
new file mode 100644
index 0000000..72527d2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go
@@ -0,0 +1,175 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package genid
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto"
+
+// Names for google.protobuf.DoubleValue.
+const (
+ DoubleValue_message_name protoreflect.Name = "DoubleValue"
+ DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue"
+)
+
+// Field names for google.protobuf.DoubleValue.
+const (
+ DoubleValue_Value_field_name protoreflect.Name = "value"
+
+ DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value"
+)
+
+// Field numbers for google.protobuf.DoubleValue.
+const (
+ DoubleValue_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.FloatValue.
+const (
+ FloatValue_message_name protoreflect.Name = "FloatValue"
+ FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue"
+)
+
+// Field names for google.protobuf.FloatValue.
+const (
+ FloatValue_Value_field_name protoreflect.Name = "value"
+
+ FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value"
+)
+
+// Field numbers for google.protobuf.FloatValue.
+const (
+ FloatValue_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.Int64Value.
+const (
+ Int64Value_message_name protoreflect.Name = "Int64Value"
+ Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value"
+)
+
+// Field names for google.protobuf.Int64Value.
+const (
+ Int64Value_Value_field_name protoreflect.Name = "value"
+
+ Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value"
+)
+
+// Field numbers for google.protobuf.Int64Value.
+const (
+ Int64Value_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.UInt64Value.
+const (
+ UInt64Value_message_name protoreflect.Name = "UInt64Value"
+ UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value"
+)
+
+// Field names for google.protobuf.UInt64Value.
+const (
+ UInt64Value_Value_field_name protoreflect.Name = "value"
+
+ UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value"
+)
+
+// Field numbers for google.protobuf.UInt64Value.
+const (
+ UInt64Value_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.Int32Value.
+const (
+ Int32Value_message_name protoreflect.Name = "Int32Value"
+ Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value"
+)
+
+// Field names for google.protobuf.Int32Value.
+const (
+ Int32Value_Value_field_name protoreflect.Name = "value"
+
+ Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value"
+)
+
+// Field numbers for google.protobuf.Int32Value.
+const (
+ Int32Value_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.UInt32Value.
+const (
+ UInt32Value_message_name protoreflect.Name = "UInt32Value"
+ UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value"
+)
+
+// Field names for google.protobuf.UInt32Value.
+const (
+ UInt32Value_Value_field_name protoreflect.Name = "value"
+
+ UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value"
+)
+
+// Field numbers for google.protobuf.UInt32Value.
+const (
+ UInt32Value_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.BoolValue.
+const (
+ BoolValue_message_name protoreflect.Name = "BoolValue"
+ BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue"
+)
+
+// Field names for google.protobuf.BoolValue.
+const (
+ BoolValue_Value_field_name protoreflect.Name = "value"
+
+ BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value"
+)
+
+// Field numbers for google.protobuf.BoolValue.
+const (
+ BoolValue_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.StringValue.
+const (
+ StringValue_message_name protoreflect.Name = "StringValue"
+ StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue"
+)
+
+// Field names for google.protobuf.StringValue.
+const (
+ StringValue_Value_field_name protoreflect.Name = "value"
+
+ StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value"
+)
+
+// Field numbers for google.protobuf.StringValue.
+const (
+ StringValue_Value_field_number protoreflect.FieldNumber = 1
+)
+
+// Names for google.protobuf.BytesValue.
+const (
+ BytesValue_message_name protoreflect.Name = "BytesValue"
+ BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue"
+)
+
+// Field names for google.protobuf.BytesValue.
+const (
+ BytesValue_Value_field_name protoreflect.Name = "value"
+
+ BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value"
+)
+
+// Field numbers for google.protobuf.BytesValue.
+const (
+ BytesValue_Value_field_number protoreflect.FieldNumber = 1
+)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
new file mode 100644
index 0000000..abee5f3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
@@ -0,0 +1,177 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Export is a zero-length named type that exists only to export a set of
+// functions that we do not want to appear in godoc.
+type Export struct{}
+
+// NewError formats a string according to the format specifier and arguments and
+// returns an error that has a "proto" prefix.
+func (Export) NewError(f string, x ...interface{}) error {
+ return errors.New(f, x...)
+}
+
+// enum is any enum type generated by protoc-gen-go
+// and must be a named int32 type.
+type enum = interface{}
+
+// EnumOf returns the protoreflect.Enum interface over e.
+// It returns nil if e is nil.
+func (Export) EnumOf(e enum) pref.Enum {
+ switch e := e.(type) {
+ case nil:
+ return nil
+ case pref.Enum:
+ return e
+ default:
+ return legacyWrapEnum(reflect.ValueOf(e))
+ }
+}
+
+// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e.
+// It returns nil if e is nil.
+func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor {
+ switch e := e.(type) {
+ case nil:
+ return nil
+ case pref.Enum:
+ return e.Descriptor()
+ default:
+ return LegacyLoadEnumDesc(reflect.TypeOf(e))
+ }
+}
+
+// EnumTypeOf returns the protoreflect.EnumType for e.
+// It returns nil if e is nil.
+func (Export) EnumTypeOf(e enum) pref.EnumType {
+ switch e := e.(type) {
+ case nil:
+ return nil
+ case pref.Enum:
+ return e.Type()
+ default:
+ return legacyLoadEnumType(reflect.TypeOf(e))
+ }
+}
+
+// EnumStringOf returns the enum value as a string, either as the name if
+// the number is resolvable, or the number formatted as a string.
+func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string {
+ ev := ed.Values().ByNumber(n)
+ if ev != nil {
+ return string(ev.Name())
+ }
+ return strconv.Itoa(int(n))
+}
+
+// message is any message type generated by protoc-gen-go
+// and must be a pointer to a named struct type.
+type message = interface{}
+
+// legacyMessageWrapper wraps a v2 message as a v1 message.
+type legacyMessageWrapper struct{ m pref.ProtoMessage }
+
+func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) }
+func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) }
+func (m legacyMessageWrapper) ProtoMessage() {}
+
+// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func (Export) ProtoMessageV1Of(m message) piface.MessageV1 {
+ switch mv := m.(type) {
+ case nil:
+ return nil
+ case piface.MessageV1:
+ return mv
+ case unwrapper:
+ return Export{}.ProtoMessageV1Of(mv.protoUnwrap())
+ case pref.ProtoMessage:
+ return legacyMessageWrapper{mv}
+ default:
+ panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m))
+ }
+}
+
+func (Export) protoMessageV2Of(m message) pref.ProtoMessage {
+ switch mv := m.(type) {
+ case nil:
+ return nil
+ case pref.ProtoMessage:
+ return mv
+ case legacyMessageWrapper:
+ return mv.m
+ case piface.MessageV1:
+ return nil
+ default:
+ panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m))
+ }
+}
+
+// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv
+ }
+ return legacyWrapMessage(reflect.ValueOf(m)).Interface()
+}
+
+// MessageOf returns the protoreflect.Message interface over m.
+// It returns nil if m is nil.
+func (Export) MessageOf(m message) pref.Message {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect()
+ }
+ return legacyWrapMessage(reflect.ValueOf(m))
+}
+
+// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m.
+// It returns nil if m is nil.
+func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect().Descriptor()
+ }
+ return LegacyLoadMessageDesc(reflect.TypeOf(m))
+}
+
+// MessageTypeOf returns the protoreflect.MessageType for m.
+// It returns nil if m is nil.
+func (Export) MessageTypeOf(m message) pref.MessageType {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect().Type()
+ }
+ return legacyLoadMessageType(reflect.TypeOf(m), "")
+}
+
+// MessageStringOf returns the message value as a string,
+// which is the message serialized in the protobuf text format.
+func (Export) MessageStringOf(m pref.ProtoMessage) string {
+ return prototext.MarshalOptions{Multiline: false}.Format(m)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
new file mode 100644
index 0000000..b82341e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -0,0 +1,141 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync"
+
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p)
+}
+
+func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
+ mi.init()
+ if !mi.needsInitCheck {
+ return nil
+ }
+ if p.IsNil() {
+ for _, f := range mi.orderedCoderFields {
+ if f.isRequired {
+ return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+ }
+ }
+ return nil
+ }
+ if mi.extensionOffset.IsValid() {
+ e := p.Apply(mi.extensionOffset).Extensions()
+ if err := mi.isInitExtensions(e); err != nil {
+ return err
+ }
+ }
+ for _, f := range mi.orderedCoderFields {
+ if !f.isRequired && f.funcs.isInit == nil {
+ continue
+ }
+ fptr := p.Apply(f.offset)
+ if f.isPointer && fptr.Elem().IsNil() {
+ if f.isRequired {
+ return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+ }
+ continue
+ }
+ if f.funcs.isInit == nil {
+ continue
+ }
+ if err := f.funcs.isInit(fptr, f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error {
+ if ext == nil {
+ return nil
+ }
+ for _, x := range *ext {
+ ei := getExtensionFieldInfo(x.Type())
+ if ei.funcs.isInit == nil {
+ continue
+ }
+ v := x.Value()
+ if !v.IsValid() {
+ continue
+ }
+ if err := ei.funcs.isInit(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var (
+ needsInitCheckMu sync.Mutex
+ needsInitCheckMap sync.Map
+)
+
+// needsInitCheck reports whether a message needs to be checked for partial initialization.
+//
+// It returns true if the message transitively includes any required or extension fields.
+func needsInitCheck(md pref.MessageDescriptor) bool {
+ if v, ok := needsInitCheckMap.Load(md); ok {
+ if has, ok := v.(bool); ok {
+ return has
+ }
+ }
+ needsInitCheckMu.Lock()
+ defer needsInitCheckMu.Unlock()
+ return needsInitCheckLocked(md)
+}
+
+func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) {
+ if v, ok := needsInitCheckMap.Load(md); ok {
+ // If has is true, we've previously determined that this message
+ // needs init checks.
+ //
+ // If has is false, we've previously determined that it can never
+ // be uninitialized.
+ //
+ // If has is not a bool, we've just encountered a cycle in the
+ // message graph. In this case, it is safe to return false: If
+ // the message does have required fields, we'll detect them later
+ // in the graph traversal.
+ has, ok := v.(bool)
+ return ok && has
+ }
+ needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message
+ defer func() {
+ needsInitCheckMap.Store(md, has)
+ }()
+ if md.RequiredNumbers().Len() > 0 {
+ return true
+ }
+ if md.ExtensionRanges().Len() > 0 {
+ return true
+ }
+ for i := 0; i < md.Fields().Len(); i++ {
+ fd := md.Fields().Get(i)
+ // Map keys are never messages, so just consider the map value.
+ if fd.IsMap() {
+ fd = fd.MapValue()
+ }
+ fmd := fd.Message()
+ if fmd != nil && needsInitCheckLocked(fmd) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
new file mode 100644
index 0000000..08d3517
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -0,0 +1,223 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type extensionFieldInfo struct {
+ wiretag uint64
+ tagsize int
+ unmarshalNeedsValue bool
+ funcs valueCoderFuncs
+ validation validationInfo
+}
+
+var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo
+
+func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
+ if xi, ok := xt.(*ExtensionInfo); ok {
+ xi.lazyInit()
+ return xi.info
+ }
+ return legacyLoadExtensionFieldInfo(xt)
+}
+
+// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt.
+func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
+ if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok {
+ return xi.(*extensionFieldInfo)
+ }
+ e := makeExtensionFieldInfo(xt.TypeDescriptor())
+ if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok {
+ return e.(*extensionFieldInfo)
+ }
+ return e
+}
+
+func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo {
+ var wiretag uint64
+ if !xd.IsPacked() {
+ wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()])
+ } else {
+ wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType)
+ }
+ e := &extensionFieldInfo{
+ wiretag: wiretag,
+ tagsize: protowire.SizeVarint(wiretag),
+ funcs: encoderFuncsForValue(xd),
+ }
+ // Does the unmarshal function need a value passed to it?
+ // This is true for composite types, where we pass in a message, list, or map to fill in,
+ // and for enums, where we pass in a prototype value to specify the concrete enum type.
+ switch xd.Kind() {
+ case pref.MessageKind, pref.GroupKind, pref.EnumKind:
+ e.unmarshalNeedsValue = true
+ default:
+ if xd.Cardinality() == pref.Repeated {
+ e.unmarshalNeedsValue = true
+ }
+ }
+ return e
+}
+
+type lazyExtensionValue struct {
+ atomicOnce uint32 // atomically set if value is valid
+ mu sync.Mutex
+ xi *extensionFieldInfo
+ value pref.Value
+ b []byte
+ fn func() pref.Value
+}
+
+type ExtensionField struct {
+ typ pref.ExtensionType
+
+ // value is either the value of GetValue,
+ // or a *lazyExtensionValue that then returns the value of GetValue.
+ value pref.Value
+ lazy *lazyExtensionValue
+}
+
+func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) {
+ if f.lazy == nil {
+ f.lazy = &lazyExtensionValue{xi: xi}
+ }
+ f.typ = xt
+ f.lazy.xi = xi
+ f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp)
+ f.lazy.b = append(f.lazy.b, b...)
+}
+
+func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool {
+ if f.typ == nil {
+ return true
+ }
+ if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 {
+ return true
+ }
+ return false
+}
+
+func (f *ExtensionField) lazyInit() {
+ f.lazy.mu.Lock()
+ defer f.lazy.mu.Unlock()
+ if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 {
+ return
+ }
+ if f.lazy.xi != nil {
+ b := f.lazy.b
+ val := f.typ.New()
+ for len(b) > 0 {
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ panic(errors.New("bad tag in lazy extension decoding"))
+ }
+ b = b[n:]
+ }
+ num := protowire.Number(tag >> 3)
+ wtyp := protowire.Type(tag & 7)
+ var out unmarshalOutput
+ var err error
+ val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions)
+ if err != nil {
+ panic(errors.New("decode failure in lazy extension decoding: %v", err))
+ }
+ b = b[out.n:]
+ }
+ f.lazy.value = val
+ } else {
+ f.lazy.value = f.lazy.fn()
+ }
+ f.lazy.xi = nil
+ f.lazy.fn = nil
+ f.lazy.b = nil
+ atomic.StoreUint32(&f.lazy.atomicOnce, 1)
+}
+
+// Set sets the type and value of the extension field.
+// This must not be called concurrently.
+func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) {
+ f.typ = t
+ f.value = v
+ f.lazy = nil
+}
+
+// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
+// This must not be called concurrently.
+func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) {
+ f.typ = t
+ f.lazy = &lazyExtensionValue{fn: fn}
+}
+
+// Value returns the value of the extension field.
+// This may be called concurrently.
+func (f *ExtensionField) Value() pref.Value {
+ if f.lazy != nil {
+ if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 {
+ f.lazyInit()
+ }
+ return f.lazy.value
+ }
+ return f.value
+}
+
+// Type returns the type of the extension field.
+// This may be called concurrently.
+func (f ExtensionField) Type() pref.ExtensionType {
+ return f.typ
+}
+
+// IsSet returns whether the extension field is set.
+// This may be called concurrently.
+func (f ExtensionField) IsSet() bool {
+ return f.typ != nil
+}
+
+// IsLazy reports whether a field is lazily encoded.
+// It is exported for testing.
+func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool {
+ var mi *MessageInfo
+ var p pointer
+ switch m := m.(type) {
+ case *messageState:
+ mi = m.messageInfo()
+ p = m.pointer()
+ case *messageReflectWrapper:
+ mi = m.messageInfo()
+ p = m.pointer()
+ default:
+ return false
+ }
+ xd, ok := fd.(pref.ExtensionTypeDescriptor)
+ if !ok {
+ return false
+ }
+ xt := xd.Type()
+ ext := mi.extensionMap(p)
+ if ext == nil {
+ return false
+ }
+ f, ok := (*ext)[int32(fd.Number())]
+ if !ok {
+ return false
+ }
+ return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
new file mode 100644
index 0000000..cb4b482
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -0,0 +1,830 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type errInvalidUTF8 struct{}
+
+func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" }
+func (errInvalidUTF8) InvalidUTF8() bool { return true }
+func (errInvalidUTF8) Unwrap() error { return errors.Error }
+
+// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof.
+//
+// For size, marshal, and isInit operations, functions are set only on the first field
+// in the oneof. The functions are called when the oneof is non-nil, and will dispatch
+// to the appropriate field-specific function as necessary.
+//
+// The unmarshal function is set on each field individually as usual.
+func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) {
+ fs := si.oneofsByName[od.Name()]
+ ft := fs.Type
+ oneofFields := make(map[reflect.Type]*coderFieldInfo)
+ needIsInit := false
+ fields := od.Fields()
+ for i, lim := 0, fields.Len(); i < lim; i++ {
+ fd := od.Fields().Get(i)
+ num := fd.Number()
+ // Make a copy of the original coderFieldInfo for use in unmarshaling.
+ //
+ // oneofFields[oneofType].funcs.marshal is the field-specific marshal function.
+ //
+ // mi.coderFields[num].marshal is set on only the first field in the oneof,
+ // and dispatches to the field-specific marshaler in oneofFields.
+ cf := *mi.coderFields[num]
+ ot := si.oneofWrappersByNumber[num]
+ cf.ft = ot.Field(0).Type
+ cf.mi, cf.funcs = fieldCoder(fd, cf.ft)
+ oneofFields[ot] = &cf
+ if cf.funcs.isInit != nil {
+ needIsInit = true
+ }
+ mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ var vw reflect.Value // pointer to wrapper type
+ vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind
+ if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot {
+ vw = vi.Elem()
+ } else {
+ vw = reflect.New(ot)
+ }
+ out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts)
+ if err != nil {
+ return out, err
+ }
+ vi.Set(vw)
+ return out, nil
+ }
+ }
+ getInfo := func(p pointer) (pointer, *coderFieldInfo) {
+ v := p.AsValueOf(ft).Elem()
+ if v.IsNil() {
+ return pointer{}, nil
+ }
+ v = v.Elem() // interface -> *struct
+ if v.IsNil() {
+ return pointer{}, nil
+ }
+ return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()]
+ }
+ first := mi.coderFields[od.Fields().Get(0).Number()]
+ first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int {
+ p, info := getInfo(p)
+ if info == nil || info.funcs.size == nil {
+ return 0
+ }
+ return info.funcs.size(p, info, opts)
+ }
+ first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ p, info := getInfo(p)
+ if info == nil || info.funcs.marshal == nil {
+ return b, nil
+ }
+ return info.funcs.marshal(b, p, info, opts)
+ }
+ first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) {
+ srcp, srcinfo := getInfo(src)
+ if srcinfo == nil || srcinfo.funcs.merge == nil {
+ return
+ }
+ dstp, dstinfo := getInfo(dst)
+ if dstinfo != srcinfo {
+ dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type()))
+ dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset)
+ }
+ srcinfo.funcs.merge(dstp, srcp, srcinfo, opts)
+ }
+ if needIsInit {
+ first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error {
+ p, info := getInfo(p)
+ if info == nil || info.funcs.isInit == nil {
+ return nil
+ }
+ return info.funcs.isInit(p, info)
+ }
+ }
+}
+
+func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs {
+ var once sync.Once
+ var messageType pref.MessageType
+ lazyInit := func() {
+ once.Do(func() {
+ messageName := fd.Message().FullName()
+ messageType, _ = preg.GlobalTypes.FindMessageByName(messageName)
+ })
+ }
+
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ m, ok := p.WeakFields().get(f.num)
+ if !ok {
+ return 0
+ }
+ lazyInit()
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
+ }
+ return sizeMessage(m, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ m, ok := p.WeakFields().get(f.num)
+ if !ok {
+ return b, nil
+ }
+ lazyInit()
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
+ }
+ return appendMessage(b, m, f.wiretag, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ fs := p.WeakFields()
+ m, ok := fs.get(f.num)
+ if !ok {
+ lazyInit()
+ if messageType == nil {
+ return unmarshalOutput{}, errUnknown
+ }
+ m = messageType.New().Interface()
+ fs.set(f.num, m)
+ }
+ return consumeMessage(b, m, wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ m, ok := p.WeakFields().get(f.num)
+ if !ok {
+ return nil
+ }
+ return proto.CheckInitialized(m)
+ },
+ merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ sm, ok := src.WeakFields().get(f.num)
+ if !ok {
+ return
+ }
+ dm, ok := dst.WeakFields().get(f.num)
+ if !ok {
+ lazyInit()
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
+ }
+ dm = messageType.New().Interface()
+ dst.WeakFields().set(f.num, dm)
+ }
+ opts.Merge(dm, sm)
+ },
+ }
+}
+
+func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeMessageInfo,
+ marshal: appendMessageInfo,
+ unmarshal: consumeMessageInfo,
+ merge: mergeMessage,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageInfo
+ }
+ return funcs
+ } else {
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return sizeMessage(m, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return appendMessage(b, m, f.wiretag, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ mp := p.AsValueOf(ft).Elem()
+ if mp.IsNil() {
+ mp.Set(reflect.New(ft.Elem()))
+ }
+ return consumeMessage(b, asMessage(mp), wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return proto.CheckInitialized(m)
+ },
+ merge: mergeMessage,
+ }
+ }
+}
+
+func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize
+}
+
+func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts)))
+ return f.mi.marshalAppendPointer(b, p.Elem(), opts)
+}
+
+func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if p.Elem().IsNil() {
+ p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts)
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitMessageInfo(p pointer, f *coderFieldInfo) error {
+ return f.mi.checkInitializedPointer(p.Elem())
+}
+
+func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int {
+ return protowire.SizeBytes(proto.Size(m)) + tagsize
+}
+
+func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(proto.Size(m)))
+ return opts.Options().MarshalAppend(b, m)
+}
+
+func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: v,
+ Message: m.ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int {
+ m := v.Message().Interface()
+ return sizeMessage(m, tagsize, opts)
+}
+
+func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ m := v.Message().Interface()
+ return appendMessage(b, m, wiretag, opts)
+}
+
+func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) {
+ m := v.Message().Interface()
+ out, err := consumeMessage(b, m, wtyp, opts)
+ return v, out, err
+}
+
+func isInitMessageValue(v pref.Value) error {
+ m := v.Message().Interface()
+ return proto.CheckInitialized(m)
+}
+
+var coderMessageValue = valueCoderFuncs{
+ size: sizeMessageValue,
+ marshal: appendMessageValue,
+ unmarshal: consumeMessageValue,
+ isInit: isInitMessageValue,
+ merge: mergeMessageValue,
+}
+
+func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int {
+ m := v.Message().Interface()
+ return sizeGroup(m, tagsize, opts)
+}
+
+func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ m := v.Message().Interface()
+ return appendGroup(b, m, wiretag, opts)
+}
+
+func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) {
+ m := v.Message().Interface()
+ out, err := consumeGroup(b, m, num, wtyp, opts)
+ return v, out, err
+}
+
+var coderGroupValue = valueCoderFuncs{
+ size: sizeGroupValue,
+ marshal: appendGroupValue,
+ unmarshal: consumeGroupValue,
+ isInit: isInitMessageValue,
+ merge: mergeMessageValue,
+}
+
+func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ num := fd.Number()
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeGroupType,
+ marshal: appendGroupType,
+ unmarshal: consumeGroupType,
+ merge: mergeMessage,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageInfo
+ }
+ return funcs
+ } else {
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return sizeGroup(m, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return appendGroup(b, m, f.wiretag, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ mp := p.AsValueOf(ft).Elem()
+ if mp.IsNil() {
+ mp.Set(reflect.New(ft.Elem()))
+ }
+ return consumeGroup(b, asMessage(mp), num, wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return proto.CheckInitialized(m)
+ },
+ merge: mergeMessage,
+ }
+ }
+}
+
+func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts)
+}
+
+func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts)
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ return b, err
+}
+
+func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ if p.Elem().IsNil() {
+ p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts)
+}
+
+func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int {
+ return 2*tagsize + proto.Size(m)
+}
+
+func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag) // start group
+ b, err := opts.Options().MarshalAppend(b, m)
+ b = protowire.AppendVarint(b, wiretag+1) // end group
+ return b, err
+}
+
+func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeGroup(num, b)
+ if n < 0 {
+ return out, errDecode
+ }
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: b,
+ Message: m.ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeMessageSliceInfo,
+ marshal: appendMessageSliceInfo,
+ unmarshal: consumeMessageSliceInfo,
+ merge: mergeMessageSlice,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageSliceInfo
+ }
+ return funcs
+ }
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return sizeMessageSlice(p, ft, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendMessageSlice(b, p, f.wiretag, ft, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ return consumeMessageSlice(b, p, ft, wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ return isInitMessageSlice(p, ft)
+ },
+ merge: mergeMessageSlice,
+ }
+}
+
+func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
+ }
+ return n
+}
+
+func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ siz := f.mi.sizePointer(v, opts)
+ b = protowire.AppendVarint(b, uint64(siz))
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ m := reflect.New(f.mi.GoReflectType.Elem()).Interface()
+ mp := pointerOfIface(m)
+ o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(mp)
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error {
+ s := p.PointerSlice()
+ for _, v := range s {
+ if err := f.mi.checkInitializedPointer(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(goType.Elem()))
+ n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ }
+ return n
+}
+
+func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(goType.Elem()))
+ b = protowire.AppendVarint(b, wiretag)
+ siz := proto.Size(m)
+ b = protowire.AppendVarint(b, uint64(siz))
+ b, err = opts.Options().MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ mp := reflect.New(goType.Elem())
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: v,
+ Message: asMessage(mp).ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(pointerOfValue(mp))
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func isInitMessageSlice(p pointer, goType reflect.Type) error {
+ s := p.PointerSlice()
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(goType.Elem()))
+ if err := proto.CheckInitialized(m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Slices of messages
+
+func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int {
+ list := listv.List()
+ n := 0
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ }
+ return n
+}
+
+func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ mopts := opts.Options()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ b = protowire.AppendVarint(b, wiretag)
+ siz := proto.Size(m)
+ b = protowire.AppendVarint(b, uint64(siz))
+ var err error
+ b, err = mopts.MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.BytesType {
+ return pref.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return pref.Value{}, out, errDecode
+ }
+ m := list.NewElement()
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: v,
+ Message: m.Message(),
+ })
+ if err != nil {
+ return pref.Value{}, out, err
+ }
+ list.Append(m)
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return listv, out, nil
+}
+
+func isInitMessageSliceValue(listv pref.Value) error {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ if err := proto.CheckInitialized(m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var coderMessageSliceValue = valueCoderFuncs{
+ size: sizeMessageSliceValue,
+ marshal: appendMessageSliceValue,
+ unmarshal: consumeMessageSliceValue,
+ isInit: isInitMessageSliceValue,
+ merge: mergeMessageListValue,
+}
+
+func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int {
+ list := listv.List()
+ n := 0
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ n += 2*tagsize + proto.Size(m)
+ }
+ return n
+}
+
+func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ mopts := opts.Options()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ b = protowire.AppendVarint(b, wiretag) // start group
+ var err error
+ b, err = mopts.MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.StartGroupType {
+ return pref.Value{}, out, errUnknown
+ }
+ b, n := protowire.ConsumeGroup(num, b)
+ if n < 0 {
+ return pref.Value{}, out, errDecode
+ }
+ m := list.NewElement()
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: b,
+ Message: m.Message(),
+ })
+ if err != nil {
+ return pref.Value{}, out, err
+ }
+ list.Append(m)
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return listv, out, nil
+}
+
+var coderGroupSliceValue = valueCoderFuncs{
+ size: sizeGroupSliceValue,
+ marshal: appendGroupSliceValue,
+ unmarshal: consumeGroupSliceValue,
+ isInit: isInitMessageSliceValue,
+ merge: mergeMessageListValue,
+}
+
+func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ num := fd.Number()
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeGroupSliceInfo,
+ marshal: appendGroupSliceInfo,
+ unmarshal: consumeGroupSliceInfo,
+ merge: mergeMessageSlice,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageSliceInfo
+ }
+ return funcs
+ }
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return sizeGroupSlice(p, ft, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendGroupSlice(b, p, f.wiretag, ft, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ return consumeGroupSlice(b, p, num, wtyp, ft, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ return isInitMessageSlice(p, ft)
+ },
+ merge: mergeMessageSlice,
+ }
+}
+
+func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(messageType.Elem()))
+ n += 2*tagsize + proto.Size(m)
+ }
+ return n
+}
+
+func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(messageType.Elem()))
+ b = protowire.AppendVarint(b, wiretag) // start group
+ b, err = opts.Options().MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeGroup(num, b)
+ if n < 0 {
+ return out, errDecode
+ }
+ mp := reflect.New(goType.Elem())
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: b,
+ Message: asMessage(mp).ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(pointerOfValue(mp))
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += 2*f.tagsize + f.mi.sizePointer(v, opts)
+ }
+ return n
+}
+
+func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ if wtyp != protowire.StartGroupType {
+ return unmarshalOutput{}, errUnknown
+ }
+ m := reflect.New(f.mi.GoReflectType.Elem()).Interface()
+ mp := pointerOfIface(m)
+ out, err := f.mi.unmarshalPointer(b, mp, f.num, opts)
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(mp)
+ return out, nil
+}
+
+func asMessage(v reflect.Value) pref.ProtoMessage {
+ if m, ok := v.Interface().(pref.ProtoMessage); ok {
+ return m
+ }
+ return legacyWrapMessage(v).Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
new file mode 100644
index 0000000..1a509b6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
@@ -0,0 +1,5637 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "math"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// sizeBool returns the size of wire encoding a bool pointer as a Bool.
+func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Bool()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+}
+
+// appendBool wire encodes a bool pointer as a Bool.
+func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Bool()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ return b, nil
+}
+
+// consumeBool wire decodes a bool pointer as a Bool.
+func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Bool() = protowire.DecodeBool(v)
+ out.n = n
+ return out, nil
+}
+
+var coderBool = pointerCoderFuncs{
+ size: sizeBool,
+ marshal: appendBool,
+ unmarshal: consumeBool,
+ merge: mergeBool,
+}
+
+// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool.
+// The zero value is not encoded.
+func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Bool()
+ if v == false {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+}
+
+// appendBoolNoZero wire encodes a bool pointer as a Bool.
+// The zero value is not encoded.
+func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Bool()
+ if v == false {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ return b, nil
+}
+
+var coderBoolNoZero = pointerCoderFuncs{
+ size: sizeBoolNoZero,
+ marshal: appendBoolNoZero,
+ unmarshal: consumeBool,
+ merge: mergeBoolNoZero,
+}
+
+// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool.
+// It panics if the pointer is nil.
+func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := **p.BoolPtr()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+}
+
+// appendBoolPtr wire encodes a *bool pointer as a Bool.
+// It panics if the pointer is nil.
+func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.BoolPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ return b, nil
+}
+
+// consumeBoolPtr wire decodes a *bool pointer as a Bool.
+func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.BoolPtr()
+ if *vp == nil {
+ *vp = new(bool)
+ }
+ **vp = protowire.DecodeBool(v)
+ out.n = n
+ return out, nil
+}
+
+var coderBoolPtr = pointerCoderFuncs{
+ size: sizeBoolPtr,
+ marshal: appendBoolPtr,
+ unmarshal: consumeBoolPtr,
+ merge: mergeBoolPtr,
+}
+
+// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool.
+func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.BoolSlice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+ }
+ return size
+}
+
+// appendBoolSlice encodes a []bool pointer as a repeated Bool.
+func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.BoolSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ }
+ return b, nil
+}
+
+// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool.
+func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.BoolSlice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, protowire.DecodeBool(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, protowire.DecodeBool(v))
+ out.n = n
+ return out, nil
+}
+
+var coderBoolSlice = pointerCoderFuncs{
+ size: sizeBoolSlice,
+ marshal: appendBoolSlice,
+ unmarshal: consumeBoolSlice,
+ merge: mergeBoolSlice,
+}
+
+// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool.
+func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.BoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeBool(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool.
+func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.BoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeBool(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ }
+ return b, nil
+}
+
+var coderBoolPackedSlice = pointerCoderFuncs{
+ size: sizeBoolPackedSlice,
+ marshal: appendBoolPackedSlice,
+ unmarshal: consumeBoolSlice,
+ merge: mergeBoolSlice,
+}
+
+// sizeBoolValue returns the size of wire encoding a bool value as a Bool.
+func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+}
+
+// appendBoolValue encodes a bool value as a Bool.
+func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ return b, nil
+}
+
+// consumeBoolValue decodes a bool value as a Bool.
+func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil
+}
+
+var coderBoolValue = valueCoderFuncs{
+ size: sizeBoolValue,
+ marshal: appendBoolValue,
+ unmarshal: consumeBoolValue,
+ merge: mergeScalarValue,
+}
+
+// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool.
+func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ }
+ return size
+}
+
+// appendBoolSliceValue encodes a []bool value as a repeated Bool.
+func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ }
+ return b, nil
+}
+
+// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool.
+func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderBoolSliceValue = valueCoderFuncs{
+ size: sizeBoolSliceValue,
+ marshal: appendBoolSliceValue,
+ unmarshal: consumeBoolSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool.
+func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool.
+func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ }
+ return b, nil
+}
+
+var coderBoolPackedSliceValue = valueCoderFuncs{
+ size: sizeBoolPackedSliceValue,
+ marshal: appendBoolPackedSliceValue,
+ unmarshal: consumeBoolSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeEnumValue returns the size of wire encoding a value as a Enum.
+func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(v.Enum()))
+}
+
+// appendEnumValue encodes a value as a Enum.
+func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ return b, nil
+}
+
+// consumeEnumValue decodes a value as a Enum.
+func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil
+}
+
+var coderEnumValue = valueCoderFuncs{
+ size: sizeEnumValue,
+ marshal: appendEnumValue,
+ unmarshal: consumeEnumValue,
+ merge: mergeScalarValue,
+}
+
+// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum.
+func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(v.Enum()))
+ }
+ return size
+}
+
+// appendEnumSliceValue encodes a [] value as a repeated Enum.
+func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ }
+ return b, nil
+}
+
+// consumeEnumSliceValue wire decodes a [] value as a repeated Enum.
+func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderEnumSliceValue = valueCoderFuncs{
+ size: sizeEnumSliceValue,
+ marshal: appendEnumSliceValue,
+ unmarshal: consumeEnumSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum.
+func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Enum()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum.
+func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Enum()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ }
+ return b, nil
+}
+
+var coderEnumPackedSliceValue = valueCoderFuncs{
+ size: sizeEnumPackedSliceValue,
+ marshal: appendEnumPackedSliceValue,
+ unmarshal: consumeEnumSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32.
+func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int32()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt32 wire encodes a int32 pointer as a Int32.
+func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt32 wire decodes a int32 pointer as a Int32.
+func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Int32() = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt32 = pointerCoderFuncs{
+ size: sizeInt32,
+ marshal: appendInt32,
+ unmarshal: consumeInt32,
+ merge: mergeInt32,
+}
+
+// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32.
+// The zero value is not encoded.
+func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt32NoZero wire encodes a int32 pointer as a Int32.
+// The zero value is not encoded.
+func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+var coderInt32NoZero = pointerCoderFuncs{
+ size: sizeInt32NoZero,
+ marshal: appendInt32NoZero,
+ unmarshal: consumeInt32,
+ merge: mergeInt32NoZero,
+}
+
+// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32.
+// It panics if the pointer is nil.
+func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := **p.Int32Ptr()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt32Ptr wire encodes a *int32 pointer as a Int32.
+// It panics if the pointer is nil.
+func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Int32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt32Ptr wire decodes a *int32 pointer as a Int32.
+func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Int32Ptr()
+ if *vp == nil {
+ *vp = new(int32)
+ }
+ **vp = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt32Ptr = pointerCoderFuncs{
+ size: sizeInt32Ptr,
+ marshal: appendInt32Ptr,
+ unmarshal: consumeInt32Ptr,
+ merge: mergeInt32Ptr,
+}
+
+// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32.
+func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(uint64(v))
+ }
+ return size
+}
+
+// appendInt32Slice encodes a []int32 pointer as a repeated Int32.
+func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32.
+func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, int32(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, int32(v))
+ out.n = n
+ return out, nil
+}
+
+var coderInt32Slice = pointerCoderFuncs{
+ size: sizeInt32Slice,
+ marshal: appendInt32Slice,
+ unmarshal: consumeInt32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32.
+func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32.
+func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderInt32PackedSlice = pointerCoderFuncs{
+ size: sizeInt32PackedSlice,
+ marshal: appendInt32PackedSlice,
+ unmarshal: consumeInt32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeInt32Value returns the size of wire encoding a int32 value as a Int32.
+func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(int32(v.Int())))
+}
+
+// appendInt32Value encodes a int32 value as a Int32.
+func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ return b, nil
+}
+
+// consumeInt32Value decodes a int32 value as a Int32.
+func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfInt32(int32(v)), out, nil
+}
+
+var coderInt32Value = valueCoderFuncs{
+ size: sizeInt32Value,
+ marshal: appendInt32Value,
+ unmarshal: consumeInt32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32.
+func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(int32(v.Int())))
+ }
+ return size
+}
+
+// appendInt32SliceValue encodes a []int32 value as a repeated Int32.
+func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ }
+ return b, nil
+}
+
+// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32.
+func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderInt32SliceValue = valueCoderFuncs{
+ size: sizeInt32SliceValue,
+ marshal: appendInt32SliceValue,
+ unmarshal: consumeInt32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32.
+func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(int32(v.Int())))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32.
+func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(int32(v.Int())))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ }
+ return b, nil
+}
+
+var coderInt32PackedSliceValue = valueCoderFuncs{
+ size: sizeInt32PackedSliceValue,
+ marshal: appendInt32PackedSliceValue,
+ unmarshal: consumeInt32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32.
+func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int32()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+}
+
+// appendSint32 wire encodes a int32 pointer as a Sint32.
+func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ return b, nil
+}
+
+// consumeSint32 wire decodes a int32 pointer as a Sint32.
+func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32))
+ out.n = n
+ return out, nil
+}
+
+var coderSint32 = pointerCoderFuncs{
+ size: sizeSint32,
+ marshal: appendSint32,
+ unmarshal: consumeSint32,
+ merge: mergeInt32,
+}
+
+// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32.
+// The zero value is not encoded.
+func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+}
+
+// appendSint32NoZero wire encodes a int32 pointer as a Sint32.
+// The zero value is not encoded.
+func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ return b, nil
+}
+
+var coderSint32NoZero = pointerCoderFuncs{
+ size: sizeSint32NoZero,
+ marshal: appendSint32NoZero,
+ unmarshal: consumeSint32,
+ merge: mergeInt32NoZero,
+}
+
+// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32.
+// It panics if the pointer is nil.
+func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := **p.Int32Ptr()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+}
+
+// appendSint32Ptr wire encodes a *int32 pointer as a Sint32.
+// It panics if the pointer is nil.
+func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Int32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ return b, nil
+}
+
+// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32.
+func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Int32Ptr()
+ if *vp == nil {
+ *vp = new(int32)
+ }
+ **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32))
+ out.n = n
+ return out, nil
+}
+
+var coderSint32Ptr = pointerCoderFuncs{
+ size: sizeSint32Ptr,
+ marshal: appendSint32Ptr,
+ unmarshal: consumeSint32Ptr,
+ merge: mergeInt32Ptr,
+}
+
+// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32.
+func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+ }
+ return size
+}
+
+// appendSint32Slice encodes a []int32 pointer as a repeated Sint32.
+func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ }
+ return b, nil
+}
+
+// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32.
+func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
+ out.n = n
+ return out, nil
+}
+
+var coderSint32Slice = pointerCoderFuncs{
+ size: sizeSint32Slice,
+ marshal: appendSint32Slice,
+ unmarshal: consumeSint32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32.
+func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32.
+func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ }
+ return b, nil
+}
+
+var coderSint32PackedSlice = pointerCoderFuncs{
+ size: sizeSint32PackedSlice,
+ marshal: appendSint32PackedSlice,
+ unmarshal: consumeSint32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32.
+func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+}
+
+// appendSint32Value encodes a int32 value as a Sint32.
+func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ return b, nil
+}
+
+// consumeSint32Value decodes a int32 value as a Sint32.
+func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil
+}
+
+var coderSint32Value = valueCoderFuncs{
+ size: sizeSint32Value,
+ marshal: appendSint32Value,
+ unmarshal: consumeSint32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32.
+func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return size
+}
+
+// appendSint32SliceValue encodes a []int32 value as a repeated Sint32.
+func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return b, nil
+}
+
+// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32.
+func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSint32SliceValue = valueCoderFuncs{
+ size: sizeSint32SliceValue,
+ marshal: appendSint32SliceValue,
+ unmarshal: consumeSint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32.
+func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32.
+func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return b, nil
+}
+
+var coderSint32PackedSliceValue = valueCoderFuncs{
+ size: sizeSint32PackedSliceValue,
+ marshal: appendSint32PackedSliceValue,
+ unmarshal: consumeSint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32.
+func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Uint32()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendUint32 wire encodes a uint32 pointer as a Uint32.
+func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeUint32 wire decodes a uint32 pointer as a Uint32.
+func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Uint32() = uint32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderUint32 = pointerCoderFuncs{
+ size: sizeUint32,
+ marshal: appendUint32,
+ unmarshal: consumeUint32,
+ merge: mergeUint32,
+}
+
+// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32.
+// The zero value is not encoded.
+func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Uint32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendUint32NoZero wire encodes a uint32 pointer as a Uint32.
+// The zero value is not encoded.
+func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+var coderUint32NoZero = pointerCoderFuncs{
+ size: sizeUint32NoZero,
+ marshal: appendUint32NoZero,
+ unmarshal: consumeUint32,
+ merge: mergeUint32NoZero,
+}
+
+// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32.
+// It panics if the pointer is nil.
+func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := **p.Uint32Ptr()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32.
+// It panics if the pointer is nil.
+func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Uint32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32.
+func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Uint32Ptr()
+ if *vp == nil {
+ *vp = new(uint32)
+ }
+ **vp = uint32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderUint32Ptr = pointerCoderFuncs{
+ size: sizeUint32Ptr,
+ marshal: appendUint32Ptr,
+ unmarshal: consumeUint32Ptr,
+ merge: mergeUint32Ptr,
+}
+
+// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32.
+func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(uint64(v))
+ }
+ return size
+}
+
+// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32.
+func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32.
+func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, uint32(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, uint32(v))
+ out.n = n
+ return out, nil
+}
+
+var coderUint32Slice = pointerCoderFuncs{
+ size: sizeUint32Slice,
+ marshal: appendUint32Slice,
+ unmarshal: consumeUint32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32.
+func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32.
+func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderUint32PackedSlice = pointerCoderFuncs{
+ size: sizeUint32PackedSlice,
+ marshal: appendUint32PackedSlice,
+ unmarshal: consumeUint32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32.
+func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint())))
+}
+
+// appendUint32Value encodes a uint32 value as a Uint32.
+func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ return b, nil
+}
+
+// consumeUint32Value decodes a uint32 value as a Uint32.
+func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfUint32(uint32(v)), out, nil
+}
+
+var coderUint32Value = valueCoderFuncs{
+ size: sizeUint32Value,
+ marshal: appendUint32Value,
+ unmarshal: consumeUint32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32.
+func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint())))
+ }
+ return size
+}
+
+// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32.
+func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ }
+ return b, nil
+}
+
+// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32.
+func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderUint32SliceValue = valueCoderFuncs{
+ size: sizeUint32SliceValue,
+ marshal: appendUint32SliceValue,
+ unmarshal: consumeUint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32.
+func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(uint32(v.Uint())))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32.
+func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(uint32(v.Uint())))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ }
+ return b, nil
+}
+
+var coderUint32PackedSliceValue = valueCoderFuncs{
+ size: sizeUint32PackedSliceValue,
+ marshal: appendUint32PackedSliceValue,
+ unmarshal: consumeUint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64.
+func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int64()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt64 wire encodes a int64 pointer as a Int64.
+func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt64 wire decodes a int64 pointer as a Int64.
+func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Int64() = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt64 = pointerCoderFuncs{
+ size: sizeInt64,
+ marshal: appendInt64,
+ unmarshal: consumeInt64,
+ merge: mergeInt64,
+}
+
+// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64.
+// The zero value is not encoded.
+func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt64NoZero wire encodes a int64 pointer as a Int64.
+// The zero value is not encoded.
+func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+var coderInt64NoZero = pointerCoderFuncs{
+ size: sizeInt64NoZero,
+ marshal: appendInt64NoZero,
+ unmarshal: consumeInt64,
+ merge: mergeInt64NoZero,
+}
+
+// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64.
+// It panics if the pointer is nil.
+func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := **p.Int64Ptr()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt64Ptr wire encodes a *int64 pointer as a Int64.
+// It panics if the pointer is nil.
+func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Int64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt64Ptr wire decodes a *int64 pointer as a Int64.
+func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Int64Ptr()
+ if *vp == nil {
+ *vp = new(int64)
+ }
+ **vp = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt64Ptr = pointerCoderFuncs{
+ size: sizeInt64Ptr,
+ marshal: appendInt64Ptr,
+ unmarshal: consumeInt64Ptr,
+ merge: mergeInt64Ptr,
+}
+
+// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64.
+func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(uint64(v))
+ }
+ return size
+}
+
+// appendInt64Slice encodes a []int64 pointer as a repeated Int64.
+func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64.
+func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, int64(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, int64(v))
+ out.n = n
+ return out, nil
+}
+
+var coderInt64Slice = pointerCoderFuncs{
+ size: sizeInt64Slice,
+ marshal: appendInt64Slice,
+ unmarshal: consumeInt64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64.
+func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64.
+func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderInt64PackedSlice = pointerCoderFuncs{
+ size: sizeInt64PackedSlice,
+ marshal: appendInt64PackedSlice,
+ unmarshal: consumeInt64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeInt64Value returns the size of wire encoding a int64 value as a Int64.
+func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(v.Int()))
+}
+
+// appendInt64Value encodes a int64 value as a Int64.
+func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ return b, nil
+}
+
+// consumeInt64Value decodes a int64 value as a Int64.
+func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfInt64(int64(v)), out, nil
+}
+
+var coderInt64Value = valueCoderFuncs{
+ size: sizeInt64Value,
+ marshal: appendInt64Value,
+ unmarshal: consumeInt64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64.
+func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(v.Int()))
+ }
+ return size
+}
+
+// appendInt64SliceValue encodes a []int64 value as a repeated Int64.
+func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64.
+func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderInt64SliceValue = valueCoderFuncs{
+ size: sizeInt64SliceValue,
+ marshal: appendInt64SliceValue,
+ unmarshal: consumeInt64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64.
+func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Int()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64.
+func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Int()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+var coderInt64PackedSliceValue = valueCoderFuncs{
+ size: sizeInt64PackedSliceValue,
+ marshal: appendInt64PackedSliceValue,
+ unmarshal: consumeInt64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64.
+func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int64()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+}
+
+// appendSint64 wire encodes a int64 pointer as a Sint64.
+func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ return b, nil
+}
+
+// consumeSint64 wire decodes a int64 pointer as a Sint64.
+func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Int64() = protowire.DecodeZigZag(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSint64 = pointerCoderFuncs{
+ size: sizeSint64,
+ marshal: appendSint64,
+ unmarshal: consumeSint64,
+ merge: mergeInt64,
+}
+
+// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64.
+// The zero value is not encoded.
+func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+}
+
+// appendSint64NoZero wire encodes a int64 pointer as a Sint64.
+// The zero value is not encoded.
+func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ return b, nil
+}
+
+var coderSint64NoZero = pointerCoderFuncs{
+ size: sizeSint64NoZero,
+ marshal: appendSint64NoZero,
+ unmarshal: consumeSint64,
+ merge: mergeInt64NoZero,
+}
+
+// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64.
+// It panics if the pointer is nil.
+func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := **p.Int64Ptr()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+}
+
+// appendSint64Ptr wire encodes a *int64 pointer as a Sint64.
+// It panics if the pointer is nil.
+func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Int64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ return b, nil
+}
+
+// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64.
+func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Int64Ptr()
+ if *vp == nil {
+ *vp = new(int64)
+ }
+ **vp = protowire.DecodeZigZag(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSint64Ptr = pointerCoderFuncs{
+ size: sizeSint64Ptr,
+ marshal: appendSint64Ptr,
+ unmarshal: consumeSint64Ptr,
+ merge: mergeInt64Ptr,
+}
+
+// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64.
+func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+ }
+ return size
+}
+
+// appendSint64Slice encodes a []int64 pointer as a repeated Sint64.
+func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ }
+ return b, nil
+}
+
+// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64.
+func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, protowire.DecodeZigZag(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, protowire.DecodeZigZag(v))
+ out.n = n
+ return out, nil
+}
+
+var coderSint64Slice = pointerCoderFuncs{
+ size: sizeSint64Slice,
+ marshal: appendSint64Slice,
+ unmarshal: consumeSint64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64.
+func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64.
+func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ }
+ return b, nil
+}
+
+var coderSint64PackedSlice = pointerCoderFuncs{
+ size: sizeSint64PackedSlice,
+ marshal: appendSint64PackedSlice,
+ unmarshal: consumeSint64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64.
+func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+}
+
+// appendSint64Value encodes a int64 value as a Sint64.
+func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ return b, nil
+}
+
+// consumeSint64Value decodes a int64 value as a Sint64.
+func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil
+}
+
+var coderSint64Value = valueCoderFuncs{
+ size: sizeSint64Value,
+ marshal: appendSint64Value,
+ unmarshal: consumeSint64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64.
+func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ }
+ return size
+}
+
+// appendSint64SliceValue encodes a []int64 value as a repeated Sint64.
+func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64.
+func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSint64SliceValue = valueCoderFuncs{
+ size: sizeSint64SliceValue,
+ marshal: appendSint64SliceValue,
+ unmarshal: consumeSint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64.
+func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64.
+func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ }
+ return b, nil
+}
+
+var coderSint64PackedSliceValue = valueCoderFuncs{
+ size: sizeSint64PackedSliceValue,
+ marshal: appendSint64PackedSliceValue,
+ unmarshal: consumeSint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64.
+func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Uint64()
+ return f.tagsize + protowire.SizeVarint(v)
+}
+
+// appendUint64 wire encodes a uint64 pointer as a Uint64.
+func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ return b, nil
+}
+
+// consumeUint64 wire decodes a uint64 pointer as a Uint64.
+func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Uint64() = v
+ out.n = n
+ return out, nil
+}
+
+var coderUint64 = pointerCoderFuncs{
+ size: sizeUint64,
+ marshal: appendUint64,
+ unmarshal: consumeUint64,
+ merge: mergeUint64,
+}
+
+// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64.
+// The zero value is not encoded.
+func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Uint64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(v)
+}
+
+// appendUint64NoZero wire encodes a uint64 pointer as a Uint64.
+// The zero value is not encoded.
+func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ return b, nil
+}
+
+var coderUint64NoZero = pointerCoderFuncs{
+ size: sizeUint64NoZero,
+ marshal: appendUint64NoZero,
+ unmarshal: consumeUint64,
+ merge: mergeUint64NoZero,
+}
+
+// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64.
+// It panics if the pointer is nil.
+func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := **p.Uint64Ptr()
+ return f.tagsize + protowire.SizeVarint(v)
+}
+
+// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64.
+// It panics if the pointer is nil.
+func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Uint64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ return b, nil
+}
+
+// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64.
+func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Uint64Ptr()
+ if *vp == nil {
+ *vp = new(uint64)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderUint64Ptr = pointerCoderFuncs{
+ size: sizeUint64Ptr,
+ marshal: appendUint64Ptr,
+ unmarshal: consumeUint64Ptr,
+ merge: mergeUint64Ptr,
+}
+
+// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64.
+func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(v)
+ }
+ return size
+}
+
+// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64.
+func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ }
+ return b, nil
+}
+
+// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64.
+func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, v)
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderUint64Slice = pointerCoderFuncs{
+ size: sizeUint64Slice,
+ marshal: appendUint64Slice,
+ unmarshal: consumeUint64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64.
+func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(v)
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64.
+func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(v)
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, v)
+ }
+ return b, nil
+}
+
+var coderUint64PackedSlice = pointerCoderFuncs{
+ size: sizeUint64PackedSlice,
+ marshal: appendUint64PackedSlice,
+ unmarshal: consumeUint64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64.
+func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeVarint(v.Uint())
+}
+
+// appendUint64Value encodes a uint64 value as a Uint64.
+func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, v.Uint())
+ return b, nil
+}
+
+// consumeUint64Value decodes a uint64 value as a Uint64.
+func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfUint64(v), out, nil
+}
+
+var coderUint64Value = valueCoderFuncs{
+ size: sizeUint64Value,
+ marshal: appendUint64Value,
+ unmarshal: consumeUint64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64.
+func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(v.Uint())
+ }
+ return size
+}
+
+// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64.
+func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, v.Uint())
+ }
+ return b, nil
+}
+
+// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64.
+func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderUint64SliceValue = valueCoderFuncs{
+ size: sizeUint64SliceValue,
+ marshal: appendUint64SliceValue,
+ unmarshal: consumeUint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64.
+func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(v.Uint())
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64.
+func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(v.Uint())
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, v.Uint())
+ }
+ return b, nil
+}
+
+var coderUint64PackedSliceValue = valueCoderFuncs{
+ size: sizeUint64PackedSliceValue,
+ marshal: appendUint64PackedSliceValue,
+ unmarshal: consumeUint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32.
+func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32 wire encodes a int32 pointer as a Sfixed32.
+func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ return b, nil
+}
+
+// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32.
+func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Int32() = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed32 = pointerCoderFuncs{
+ size: sizeSfixed32,
+ marshal: appendSfixed32,
+ unmarshal: consumeSfixed32,
+ merge: mergeInt32,
+}
+
+// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32.
+// The zero value is not encoded.
+func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32.
+// The zero value is not encoded.
+func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ return b, nil
+}
+
+var coderSfixed32NoZero = pointerCoderFuncs{
+ size: sizeSfixed32NoZero,
+ marshal: appendSfixed32NoZero,
+ unmarshal: consumeSfixed32,
+ merge: mergeInt32NoZero,
+}
+
+// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32.
+// It panics if the pointer is nil.
+func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32.
+// It panics if the pointer is nil.
+func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Int32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ return b, nil
+}
+
+// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32.
+func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Int32Ptr()
+ if *vp == nil {
+ *vp = new(int32)
+ }
+ **vp = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed32Ptr = pointerCoderFuncs{
+ size: sizeSfixed32Ptr,
+ marshal: appendSfixed32Ptr,
+ unmarshal: consumeSfixed32Ptr,
+ merge: mergeInt32Ptr,
+}
+
+// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32.
+func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32.
+func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+
+// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32.
+func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, int32(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, int32(v))
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed32Slice = pointerCoderFuncs{
+ size: sizeSfixed32Slice,
+ marshal: appendSfixed32Slice,
+ unmarshal: consumeSfixed32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32.
+func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed32()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32.
+func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+
+var coderSfixed32PackedSlice = pointerCoderFuncs{
+ size: sizeSfixed32PackedSlice,
+ marshal: appendSfixed32PackedSlice,
+ unmarshal: consumeSfixed32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32.
+func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32Value encodes a int32 value as a Sfixed32.
+func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ return b, nil
+}
+
+// consumeSfixed32Value decodes a int32 value as a Sfixed32.
+func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfInt32(int32(v)), out, nil
+}
+
+var coderSfixed32Value = valueCoderFuncs{
+ size: sizeSfixed32Value,
+ marshal: appendSfixed32Value,
+ unmarshal: consumeSfixed32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32.
+func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32.
+func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32.
+func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSfixed32SliceValue = valueCoderFuncs{
+ size: sizeSfixed32SliceValue,
+ marshal: appendSfixed32SliceValue,
+ unmarshal: consumeSfixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32.
+func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed32()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32.
+func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ }
+ return b, nil
+}
+
+var coderSfixed32PackedSliceValue = valueCoderFuncs{
+ size: sizeSfixed32PackedSliceValue,
+ marshal: appendSfixed32PackedSliceValue,
+ unmarshal: consumeSfixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32.
+func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32 wire encodes a uint32 pointer as a Fixed32.
+func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ return b, nil
+}
+
+// consumeFixed32 wire decodes a uint32 pointer as a Fixed32.
+func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Uint32() = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed32 = pointerCoderFuncs{
+ size: sizeFixed32,
+ marshal: appendFixed32,
+ unmarshal: consumeFixed32,
+ merge: mergeUint32,
+}
+
+// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32.
+// The zero value is not encoded.
+func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Uint32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32.
+// The zero value is not encoded.
+func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ return b, nil
+}
+
+var coderFixed32NoZero = pointerCoderFuncs{
+ size: sizeFixed32NoZero,
+ marshal: appendFixed32NoZero,
+ unmarshal: consumeFixed32,
+ merge: mergeUint32NoZero,
+}
+
+// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32.
+// It panics if the pointer is nil.
+func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32.
+// It panics if the pointer is nil.
+func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Uint32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ return b, nil
+}
+
+// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32.
+func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Uint32Ptr()
+ if *vp == nil {
+ *vp = new(uint32)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed32Ptr = pointerCoderFuncs{
+ size: sizeFixed32Ptr,
+ marshal: appendFixed32Ptr,
+ unmarshal: consumeFixed32Ptr,
+ merge: mergeUint32Ptr,
+}
+
+// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32.
+func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32.
+func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ }
+ return b, nil
+}
+
+// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32.
+func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, v)
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderFixed32Slice = pointerCoderFuncs{
+ size: sizeFixed32Slice,
+ marshal: appendFixed32Slice,
+ unmarshal: consumeFixed32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32.
+func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed32()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32.
+func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed32(b, v)
+ }
+ return b, nil
+}
+
+var coderFixed32PackedSlice = pointerCoderFuncs{
+ size: sizeFixed32PackedSlice,
+ marshal: appendFixed32PackedSlice,
+ unmarshal: consumeFixed32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32.
+func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32Value encodes a uint32 value as a Fixed32.
+func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ return b, nil
+}
+
+// consumeFixed32Value decodes a uint32 value as a Fixed32.
+func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfUint32(uint32(v)), out, nil
+}
+
+var coderFixed32Value = valueCoderFuncs{
+ size: sizeFixed32Value,
+ marshal: appendFixed32Value,
+ unmarshal: consumeFixed32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32.
+func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32.
+func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ }
+ return b, nil
+}
+
+// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32.
+func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderFixed32SliceValue = valueCoderFuncs{
+ size: sizeFixed32SliceValue,
+ marshal: appendFixed32SliceValue,
+ unmarshal: consumeFixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32.
+func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed32()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32.
+func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ }
+ return b, nil
+}
+
+var coderFixed32PackedSliceValue = valueCoderFuncs{
+ size: sizeFixed32PackedSliceValue,
+ marshal: appendFixed32PackedSliceValue,
+ unmarshal: consumeFixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFloat returns the size of wire encoding a float32 pointer as a Float.
+func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFloat wire encodes a float32 pointer as a Float.
+func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Float32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ return b, nil
+}
+
+// consumeFloat wire decodes a float32 pointer as a Float.
+func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Float32() = math.Float32frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderFloat = pointerCoderFuncs{
+ size: sizeFloat,
+ marshal: appendFloat,
+ unmarshal: consumeFloat,
+ merge: mergeFloat32,
+}
+
+// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float.
+// The zero value is not encoded.
+func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Float32()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFloatNoZero wire encodes a float32 pointer as a Float.
+// The zero value is not encoded.
+func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Float32()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ return b, nil
+}
+
+var coderFloatNoZero = pointerCoderFuncs{
+ size: sizeFloatNoZero,
+ marshal: appendFloatNoZero,
+ unmarshal: consumeFloat,
+ merge: mergeFloat32NoZero,
+}
+
+// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float.
+// It panics if the pointer is nil.
+func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFloatPtr wire encodes a *float32 pointer as a Float.
+// It panics if the pointer is nil.
+func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Float32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ return b, nil
+}
+
+// consumeFloatPtr wire decodes a *float32 pointer as a Float.
+func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Float32Ptr()
+ if *vp == nil {
+ *vp = new(float32)
+ }
+ **vp = math.Float32frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderFloatPtr = pointerCoderFuncs{
+ size: sizeFloatPtr,
+ marshal: appendFloatPtr,
+ unmarshal: consumeFloatPtr,
+ merge: mergeFloat32Ptr,
+}
+
+// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float.
+func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Float32Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFloatSlice encodes a []float32 pointer as a repeated Float.
+func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Float32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+
+// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float.
+func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Float32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, math.Float32frombits(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, math.Float32frombits(v))
+ out.n = n
+ return out, nil
+}
+
+var coderFloatSlice = pointerCoderFuncs{
+ size: sizeFloatSlice,
+ marshal: appendFloatSlice,
+ unmarshal: consumeFloatSlice,
+ merge: mergeFloat32Slice,
+}
+
+// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float.
+func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Float32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed32()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float.
+func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Float32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+
+var coderFloatPackedSlice = pointerCoderFuncs{
+ size: sizeFloatPackedSlice,
+ marshal: appendFloatPackedSlice,
+ unmarshal: consumeFloatSlice,
+ merge: mergeFloat32Slice,
+}
+
+// sizeFloatValue returns the size of wire encoding a float32 value as a Float.
+func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeFixed32()
+}
+
+// appendFloatValue encodes a float32 value as a Float.
+func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ return b, nil
+}
+
+// consumeFloatValue decodes a float32 value as a Float.
+func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil
+}
+
+var coderFloatValue = valueCoderFuncs{
+ size: sizeFloatValue,
+ marshal: appendFloatValue,
+ unmarshal: consumeFloatValue,
+ merge: mergeScalarValue,
+}
+
+// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float.
+func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFloatSliceValue encodes a []float32 value as a repeated Float.
+func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ }
+ return b, nil
+}
+
+// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float.
+func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderFloatSliceValue = valueCoderFuncs{
+ size: sizeFloatSliceValue,
+ marshal: appendFloatSliceValue,
+ unmarshal: consumeFloatSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float.
+func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed32()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float.
+func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ }
+ return b, nil
+}
+
+var coderFloatPackedSliceValue = valueCoderFuncs{
+ size: sizeFloatPackedSliceValue,
+ marshal: appendFloatPackedSliceValue,
+ unmarshal: consumeFloatSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64.
+func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64 wire encodes a int64 pointer as a Sfixed64.
+func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ return b, nil
+}
+
+// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64.
+func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Int64() = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed64 = pointerCoderFuncs{
+ size: sizeSfixed64,
+ marshal: appendSfixed64,
+ unmarshal: consumeSfixed64,
+ merge: mergeInt64,
+}
+
+// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64.
+// The zero value is not encoded.
+func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Int64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64.
+// The zero value is not encoded.
+func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ return b, nil
+}
+
+var coderSfixed64NoZero = pointerCoderFuncs{
+ size: sizeSfixed64NoZero,
+ marshal: appendSfixed64NoZero,
+ unmarshal: consumeSfixed64,
+ merge: mergeInt64NoZero,
+}
+
+// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64.
+// It panics if the pointer is nil.
+func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64.
+// It panics if the pointer is nil.
+func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Int64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ return b, nil
+}
+
+// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64.
+func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Int64Ptr()
+ if *vp == nil {
+ *vp = new(int64)
+ }
+ **vp = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed64Ptr = pointerCoderFuncs{
+ size: sizeSfixed64Ptr,
+ marshal: appendSfixed64Ptr,
+ unmarshal: consumeSfixed64Ptr,
+ merge: mergeInt64Ptr,
+}
+
+// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64.
+func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64.
+func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64.
+func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, int64(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, int64(v))
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed64Slice = pointerCoderFuncs{
+ size: sizeSfixed64Slice,
+ marshal: appendSfixed64Slice,
+ unmarshal: consumeSfixed64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64.
+func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed64()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64.
+func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderSfixed64PackedSlice = pointerCoderFuncs{
+ size: sizeSfixed64PackedSlice,
+ marshal: appendSfixed64PackedSlice,
+ unmarshal: consumeSfixed64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64.
+func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64Value encodes a int64 value as a Sfixed64.
+func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ return b, nil
+}
+
+// consumeSfixed64Value decodes a int64 value as a Sfixed64.
+func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfInt64(int64(v)), out, nil
+}
+
+var coderSfixed64Value = valueCoderFuncs{
+ size: sizeSfixed64Value,
+ marshal: appendSfixed64Value,
+ unmarshal: consumeSfixed64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64.
+func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64.
+func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64.
+func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSfixed64SliceValue = valueCoderFuncs{
+ size: sizeSfixed64SliceValue,
+ marshal: appendSfixed64SliceValue,
+ unmarshal: consumeSfixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64.
+func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed64()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64.
+func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+var coderSfixed64PackedSliceValue = valueCoderFuncs{
+ size: sizeSfixed64PackedSliceValue,
+ marshal: appendSfixed64PackedSliceValue,
+ unmarshal: consumeSfixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64.
+func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64 wire encodes a uint64 pointer as a Fixed64.
+func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ return b, nil
+}
+
+// consumeFixed64 wire decodes a uint64 pointer as a Fixed64.
+func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Uint64() = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed64 = pointerCoderFuncs{
+ size: sizeFixed64,
+ marshal: appendFixed64,
+ unmarshal: consumeFixed64,
+ merge: mergeUint64,
+}
+
+// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64.
+// The zero value is not encoded.
+func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Uint64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64.
+// The zero value is not encoded.
+func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ return b, nil
+}
+
+var coderFixed64NoZero = pointerCoderFuncs{
+ size: sizeFixed64NoZero,
+ marshal: appendFixed64NoZero,
+ unmarshal: consumeFixed64,
+ merge: mergeUint64NoZero,
+}
+
+// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64.
+// It panics if the pointer is nil.
+func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64.
+// It panics if the pointer is nil.
+func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Uint64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ return b, nil
+}
+
+// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64.
+func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Uint64Ptr()
+ if *vp == nil {
+ *vp = new(uint64)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed64Ptr = pointerCoderFuncs{
+ size: sizeFixed64Ptr,
+ marshal: appendFixed64Ptr,
+ unmarshal: consumeFixed64Ptr,
+ merge: mergeUint64Ptr,
+}
+
+// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64.
+func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64.
+func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ }
+ return b, nil
+}
+
+// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64.
+func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, v)
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderFixed64Slice = pointerCoderFuncs{
+ size: sizeFixed64Slice,
+ marshal: appendFixed64Slice,
+ unmarshal: consumeFixed64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64.
+func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed64()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64.
+func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed64(b, v)
+ }
+ return b, nil
+}
+
+var coderFixed64PackedSlice = pointerCoderFuncs{
+ size: sizeFixed64PackedSlice,
+ marshal: appendFixed64PackedSlice,
+ unmarshal: consumeFixed64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64.
+func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64Value encodes a uint64 value as a Fixed64.
+func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, v.Uint())
+ return b, nil
+}
+
+// consumeFixed64Value decodes a uint64 value as a Fixed64.
+func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfUint64(v), out, nil
+}
+
+var coderFixed64Value = valueCoderFuncs{
+ size: sizeFixed64Value,
+ marshal: appendFixed64Value,
+ unmarshal: consumeFixed64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64.
+func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64.
+func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, v.Uint())
+ }
+ return b, nil
+}
+
+// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64.
+func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderFixed64SliceValue = valueCoderFuncs{
+ size: sizeFixed64SliceValue,
+ marshal: appendFixed64SliceValue,
+ unmarshal: consumeFixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64.
+func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed64()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64.
+func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed64(b, v.Uint())
+ }
+ return b, nil
+}
+
+var coderFixed64PackedSliceValue = valueCoderFuncs{
+ size: sizeFixed64PackedSliceValue,
+ marshal: appendFixed64PackedSliceValue,
+ unmarshal: consumeFixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeDouble returns the size of wire encoding a float64 pointer as a Double.
+func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendDouble wire encodes a float64 pointer as a Double.
+func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Float64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ return b, nil
+}
+
+// consumeDouble wire decodes a float64 pointer as a Double.
+func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Float64() = math.Float64frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderDouble = pointerCoderFuncs{
+ size: sizeDouble,
+ marshal: appendDouble,
+ unmarshal: consumeDouble,
+ merge: mergeFloat64,
+}
+
+// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double.
+// The zero value is not encoded.
+func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Float64()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendDoubleNoZero wire encodes a float64 pointer as a Double.
+// The zero value is not encoded.
+func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Float64()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ return b, nil
+}
+
+var coderDoubleNoZero = pointerCoderFuncs{
+ size: sizeDoubleNoZero,
+ marshal: appendDoubleNoZero,
+ unmarshal: consumeDouble,
+ merge: mergeFloat64NoZero,
+}
+
+// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double.
+// It panics if the pointer is nil.
+func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendDoublePtr wire encodes a *float64 pointer as a Double.
+// It panics if the pointer is nil.
+func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.Float64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ return b, nil
+}
+
+// consumeDoublePtr wire decodes a *float64 pointer as a Double.
+func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.Float64Ptr()
+ if *vp == nil {
+ *vp = new(float64)
+ }
+ **vp = math.Float64frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderDoublePtr = pointerCoderFuncs{
+ size: sizeDoublePtr,
+ marshal: appendDoublePtr,
+ unmarshal: consumeDoublePtr,
+ merge: mergeFloat64Ptr,
+}
+
+// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double.
+func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Float64Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendDoubleSlice encodes a []float64 pointer as a repeated Double.
+func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Float64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+
+// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double.
+func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Float64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ s = append(s, math.Float64frombits(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, math.Float64frombits(v))
+ out.n = n
+ return out, nil
+}
+
+var coderDoubleSlice = pointerCoderFuncs{
+ size: sizeDoubleSlice,
+ marshal: appendDoubleSlice,
+ unmarshal: consumeDoubleSlice,
+ merge: mergeFloat64Slice,
+}
+
+// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double.
+func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.Float64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed64()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double.
+func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.Float64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+
+var coderDoublePackedSlice = pointerCoderFuncs{
+ size: sizeDoublePackedSlice,
+ marshal: appendDoublePackedSlice,
+ unmarshal: consumeDoubleSlice,
+ merge: mergeFloat64Slice,
+}
+
+// sizeDoubleValue returns the size of wire encoding a float64 value as a Double.
+func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeFixed64()
+}
+
+// appendDoubleValue encodes a float64 value as a Double.
+func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ return b, nil
+}
+
+// consumeDoubleValue decodes a float64 value as a Double.
+func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil
+}
+
+var coderDoubleValue = valueCoderFuncs{
+ size: sizeDoubleValue,
+ marshal: appendDoubleValue,
+ unmarshal: consumeDoubleValue,
+ merge: mergeScalarValue,
+}
+
+// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double.
+func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendDoubleSliceValue encodes a []float64 value as a repeated Double.
+func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ }
+ return b, nil
+}
+
+// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double.
+func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderDoubleSliceValue = valueCoderFuncs{
+ size: sizeDoubleSliceValue,
+ marshal: appendDoubleSliceValue,
+ unmarshal: consumeDoubleSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double.
+func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed64()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double.
+func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ }
+ return b, nil
+}
+
+var coderDoublePackedSliceValue = valueCoderFuncs{
+ size: sizeDoublePackedSliceValue,
+ marshal: appendDoublePackedSliceValue,
+ unmarshal: consumeDoubleSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeString returns the size of wire encoding a string pointer as a String.
+func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.String()
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendString wire encodes a string pointer as a String.
+func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.String()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ return b, nil
+}
+
+// consumeString wire decodes a string pointer as a String.
+func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.String() = string(v)
+ out.n = n
+ return out, nil
+}
+
+var coderString = pointerCoderFuncs{
+ size: sizeString,
+ marshal: appendString,
+ unmarshal: consumeString,
+ merge: mergeString,
+}
+
+// appendStringValidateUTF8 wire encodes a string pointer as a String.
+func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.String()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringValidateUTF8 wire decodes a string pointer as a String.
+func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ *p.String() = string(v)
+ out.n = n
+ return out, nil
+}
+
+var coderStringValidateUTF8 = pointerCoderFuncs{
+ size: sizeString,
+ marshal: appendStringValidateUTF8,
+ unmarshal: consumeStringValidateUTF8,
+ merge: mergeString,
+}
+
+// sizeStringNoZero returns the size of wire encoding a string pointer as a String.
+// The zero value is not encoded.
+func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.String()
+ if len(v) == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendStringNoZero wire encodes a string pointer as a String.
+// The zero value is not encoded.
+func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.String()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ return b, nil
+}
+
+var coderStringNoZero = pointerCoderFuncs{
+ size: sizeStringNoZero,
+ marshal: appendStringNoZero,
+ unmarshal: consumeString,
+ merge: mergeStringNoZero,
+}
+
+// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String.
+// The zero value is not encoded.
+func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.String()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringNoZero,
+ marshal: appendStringNoZeroValidateUTF8,
+ unmarshal: consumeStringValidateUTF8,
+ merge: mergeStringNoZero,
+}
+
+// sizeStringPtr returns the size of wire encoding a *string pointer as a String.
+// It panics if the pointer is nil.
+func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := **p.StringPtr()
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendStringPtr wire encodes a *string pointer as a String.
+// It panics if the pointer is nil.
+func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.StringPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ return b, nil
+}
+
+// consumeStringPtr wire decodes a *string pointer as a String.
+func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ vp := p.StringPtr()
+ if *vp == nil {
+ *vp = new(string)
+ }
+ **vp = string(v)
+ out.n = n
+ return out, nil
+}
+
+var coderStringPtr = pointerCoderFuncs{
+ size: sizeStringPtr,
+ marshal: appendStringPtr,
+ unmarshal: consumeStringPtr,
+ merge: mergeStringPtr,
+}
+
+// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String.
+// It panics if the pointer is nil.
+func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := **p.StringPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String.
+func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ vp := p.StringPtr()
+ if *vp == nil {
+ *vp = new(string)
+ }
+ **vp = string(v)
+ out.n = n
+ return out, nil
+}
+
+var coderStringPtrValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringPtr,
+ marshal: appendStringPtrValidateUTF8,
+ unmarshal: consumeStringPtrValidateUTF8,
+ merge: mergeStringPtr,
+}
+
+// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String.
+func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.StringSlice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeBytes(len(v))
+ }
+ return size
+}
+
+// appendStringSlice encodes a []string pointer as a repeated String.
+func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.StringSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ }
+ return b, nil
+}
+
+// consumeStringSlice wire decodes a []string pointer as a repeated String.
+func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.StringSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, string(v))
+ out.n = n
+ return out, nil
+}
+
+var coderStringSlice = pointerCoderFuncs{
+ size: sizeStringSlice,
+ marshal: appendStringSlice,
+ unmarshal: consumeStringSlice,
+ merge: mergeStringSlice,
+}
+
+// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String.
+func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.StringSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ }
+ return b, nil
+}
+
+// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String.
+func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ sp := p.StringSlice()
+ *sp = append(*sp, string(v))
+ out.n = n
+ return out, nil
+}
+
+var coderStringSliceValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringSlice,
+ marshal: appendStringSliceValidateUTF8,
+ unmarshal: consumeStringSliceValidateUTF8,
+ merge: mergeStringSlice,
+}
+
+// sizeStringValue returns the size of wire encoding a string value as a String.
+func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeBytes(len(v.String()))
+}
+
+// appendStringValue encodes a string value as a String.
+func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendString(b, v.String())
+ return b, nil
+}
+
+// consumeStringValue decodes a string value as a String.
+func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfString(string(v)), out, nil
+}
+
+var coderStringValue = valueCoderFuncs{
+ size: sizeStringValue,
+ marshal: appendStringValue,
+ unmarshal: consumeStringValue,
+ merge: mergeScalarValue,
+}
+
+// appendStringValueValidateUTF8 encodes a string value as a String.
+func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendString(b, v.String())
+ if !utf8.ValidString(v.String()) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringValueValidateUTF8 decodes a string value as a String.
+func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ if !utf8.Valid(v) {
+ return protoreflect.Value{}, out, errInvalidUTF8{}
+ }
+ out.n = n
+ return protoreflect.ValueOfString(string(v)), out, nil
+}
+
+var coderStringValueValidateUTF8 = valueCoderFuncs{
+ size: sizeStringValue,
+ marshal: appendStringValueValidateUTF8,
+ unmarshal: consumeStringValueValidateUTF8,
+ merge: mergeScalarValue,
+}
+
+// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String.
+func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeBytes(len(v.String()))
+ }
+ return size
+}
+
+// appendStringSliceValue encodes a []string value as a repeated String.
+func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendString(b, v.String())
+ }
+ return b, nil
+}
+
+// consumeStringSliceValue wire decodes a []string value as a repeated String.
+func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfString(string(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderStringSliceValue = valueCoderFuncs{
+ size: sizeStringSliceValue,
+ marshal: appendStringSliceValue,
+ unmarshal: consumeStringSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes.
+func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Bytes()
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendBytes wire encodes a []byte pointer as a Bytes.
+func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ return b, nil
+}
+
+// consumeBytes wire decodes a []byte pointer as a Bytes.
+func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Bytes() = append(emptyBuf[:], v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytes = pointerCoderFuncs{
+ size: sizeBytes,
+ marshal: appendBytes,
+ unmarshal: consumeBytes,
+ merge: mergeBytes,
+}
+
+// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes.
+func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ if !utf8.Valid(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes.
+func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ *p.Bytes() = append(emptyBuf[:], v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytesValidateUTF8 = pointerCoderFuncs{
+ size: sizeBytes,
+ marshal: appendBytesValidateUTF8,
+ unmarshal: consumeBytesValidateUTF8,
+ merge: mergeBytes,
+}
+
+// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes.
+// The zero value is not encoded.
+func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ v := *p.Bytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendBytesNoZero wire encodes a []byte pointer as a Bytes.
+// The zero value is not encoded.
+func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ return b, nil
+}
+
+// consumeBytesNoZero wire decodes a []byte pointer as a Bytes.
+// The zero value is not decoded.
+func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *p.Bytes() = append(([]byte)(nil), v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytesNoZero = pointerCoderFuncs{
+ size: sizeBytesNoZero,
+ marshal: appendBytesNoZero,
+ unmarshal: consumeBytesNoZero,
+ merge: mergeBytesNoZero,
+}
+
+// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes.
+// The zero value is not encoded.
+func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ if !utf8.Valid(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes.
+func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ *p.Bytes() = append(([]byte)(nil), v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{
+ size: sizeBytesNoZero,
+ marshal: appendBytesNoZeroValidateUTF8,
+ unmarshal: consumeBytesNoZeroValidateUTF8,
+ merge: mergeBytesNoZero,
+}
+
+// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes.
+func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := *p.BytesSlice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeBytes(len(v))
+ }
+ return size
+}
+
+// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes.
+func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.BytesSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ }
+ return b, nil
+}
+
+// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes.
+func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.BytesSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ *sp = append(*sp, append(emptyBuf[:], v...))
+ out.n = n
+ return out, nil
+}
+
+var coderBytesSlice = pointerCoderFuncs{
+ size: sizeBytesSlice,
+ marshal: appendBytesSlice,
+ unmarshal: consumeBytesSlice,
+ merge: mergeBytesSlice,
+}
+
+// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes.
+func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := *p.BytesSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ if !utf8.Valid(v) {
+ return b, errInvalidUTF8{}
+ }
+ }
+ return b, nil
+}
+
+// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes.
+func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ sp := p.BytesSlice()
+ *sp = append(*sp, append(emptyBuf[:], v...))
+ out.n = n
+ return out, nil
+}
+
+var coderBytesSliceValidateUTF8 = pointerCoderFuncs{
+ size: sizeBytesSlice,
+ marshal: appendBytesSliceValidateUTF8,
+ unmarshal: consumeBytesSliceValidateUTF8,
+ merge: mergeBytesSlice,
+}
+
+// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes.
+func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
+ return tagsize + protowire.SizeBytes(len(v.Bytes()))
+}
+
+// appendBytesValue encodes a []byte value as a Bytes.
+func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendBytes(b, v.Bytes())
+ return b, nil
+}
+
+// consumeBytesValue decodes a []byte value as a Bytes.
+func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ out.n = n
+ return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil
+}
+
+var coderBytesValue = valueCoderFuncs{
+ size: sizeBytesValue,
+ marshal: appendBytesValue,
+ unmarshal: consumeBytesValue,
+ merge: mergeBytesValue,
+}
+
+// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes.
+func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeBytes(len(v.Bytes()))
+ }
+ return size
+}
+
+// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes.
+func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendBytes(b, v.Bytes())
+ }
+ return b, nil
+}
+
+// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes.
+func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, errDecode
+ }
+ list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderBytesSliceValue = valueCoderFuncs{
+ size: sizeBytesSliceValue,
+ marshal: appendBytesSliceValue,
+ unmarshal: consumeBytesSliceValue,
+ merge: mergeBytesListValue,
+}
+
+// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices.
+var emptyBuf [0]byte
+
+var wireTypes = map[protoreflect.Kind]protowire.Type{
+ protoreflect.BoolKind: protowire.VarintType,
+ protoreflect.EnumKind: protowire.VarintType,
+ protoreflect.Int32Kind: protowire.VarintType,
+ protoreflect.Sint32Kind: protowire.VarintType,
+ protoreflect.Uint32Kind: protowire.VarintType,
+ protoreflect.Int64Kind: protowire.VarintType,
+ protoreflect.Sint64Kind: protowire.VarintType,
+ protoreflect.Uint64Kind: protowire.VarintType,
+ protoreflect.Sfixed32Kind: protowire.Fixed32Type,
+ protoreflect.Fixed32Kind: protowire.Fixed32Type,
+ protoreflect.FloatKind: protowire.Fixed32Type,
+ protoreflect.Sfixed64Kind: protowire.Fixed64Type,
+ protoreflect.Fixed64Kind: protowire.Fixed64Type,
+ protoreflect.DoubleKind: protowire.Fixed64Type,
+ protoreflect.StringKind: protowire.BytesType,
+ protoreflect.BytesKind: protowire.BytesType,
+ protoreflect.MessageKind: protowire.BytesType,
+ protoreflect.GroupKind: protowire.StartGroupType,
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
new file mode 100644
index 0000000..c1245fe
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -0,0 +1,388 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/genid"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type mapInfo struct {
+ goType reflect.Type
+ keyWiretag uint64
+ valWiretag uint64
+ keyFuncs valueCoderFuncs
+ valFuncs valueCoderFuncs
+ keyZero pref.Value
+ keyKind pref.Kind
+ conv *mapConverter
+}
+
+func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) {
+ // TODO: Consider generating specialized map coders.
+ keyField := fd.MapKey()
+ valField := fd.MapValue()
+ keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()])
+ valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()])
+ keyFuncs := encoderFuncsForValue(keyField)
+ valFuncs := encoderFuncsForValue(valField)
+ conv := newMapConverter(ft, fd)
+
+ mapi := &mapInfo{
+ goType: ft,
+ keyWiretag: keyWiretag,
+ valWiretag: valWiretag,
+ keyFuncs: keyFuncs,
+ valFuncs: valFuncs,
+ keyZero: keyField.Default(),
+ keyKind: keyField.Kind(),
+ conv: conv,
+ }
+ if valField.Kind() == pref.MessageKind {
+ valueMessage = getMessageInfo(ft.Elem())
+ }
+
+ funcs = pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ mp := p.AsValueOf(ft)
+ if mp.Elem().IsNil() {
+ mp.Elem().Set(reflect.MakeMap(mapi.goType))
+ }
+ if f.mi == nil {
+ return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts)
+ } else {
+ return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts)
+ }
+ },
+ }
+ switch valField.Kind() {
+ case pref.MessageKind:
+ funcs.merge = mergeMapOfMessage
+ case pref.BytesKind:
+ funcs.merge = mergeMapOfBytes
+ default:
+ funcs.merge = mergeMap
+ }
+ if valFuncs.isInit != nil {
+ funcs.isInit = func(p pointer, f *coderFieldInfo) error {
+ return isInitMap(p.AsValueOf(ft).Elem(), mapi, f)
+ }
+ }
+ return valueMessage, funcs
+}
+
+const (
+ mapKeyTagSize = 1 // field 1, tag size 1.
+ mapValTagSize = 1 // field 2, tag size 2.
+)
+
+func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int {
+ if mapv.Len() == 0 {
+ return 0
+ }
+ n := 0
+ iter := mapRange(mapv)
+ for iter.Next() {
+ key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey()
+ keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
+ var valSize int
+ value := mapi.conv.valConv.PBValueOf(iter.Value())
+ if f.mi == nil {
+ valSize = mapi.valFuncs.size(value, mapValTagSize, opts)
+ } else {
+ p := pointerOfValue(iter.Value())
+ valSize += mapValTagSize
+ valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts))
+ }
+ n += f.tagsize + protowire.SizeBytes(keySize+valSize)
+ }
+ return n
+}
+
+func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ var (
+ key = mapi.keyZero
+ val = mapi.conv.valConv.New()
+ )
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if num > protowire.MaxValidNumber {
+ return out, errDecode
+ }
+ b = b[n:]
+ err := errUnknown
+ switch num {
+ case genid.MapEntry_Key_field_number:
+ var v pref.Value
+ var o unmarshalOutput
+ v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts)
+ if err != nil {
+ break
+ }
+ key = v
+ n = o.n
+ case genid.MapEntry_Value_field_number:
+ var v pref.Value
+ var o unmarshalOutput
+ v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts)
+ if err != nil {
+ break
+ }
+ val = v
+ n = o.n
+ }
+ if err == errUnknown {
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, errDecode
+ }
+ } else if err != nil {
+ return out, err
+ }
+ b = b[n:]
+ }
+ mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val))
+ out.n = n
+ return out, nil
+}
+
+func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ var (
+ key = mapi.keyZero
+ val = reflect.New(f.mi.GoReflectType.Elem())
+ )
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if num > protowire.MaxValidNumber {
+ return out, errDecode
+ }
+ b = b[n:]
+ err := errUnknown
+ switch num {
+ case 1:
+ var v pref.Value
+ var o unmarshalOutput
+ v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts)
+ if err != nil {
+ break
+ }
+ key = v
+ n = o.n
+ case 2:
+ if wtyp != protowire.BytesType {
+ break
+ }
+ var v []byte
+ v, n = protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ var o unmarshalOutput
+ o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts)
+ if o.initialized {
+ // Consider this map item initialized so long as we see
+ // an initialized value.
+ out.initialized = true
+ }
+ }
+ if err == errUnknown {
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, errDecode
+ }
+ } else if err != nil {
+ return out, err
+ }
+ b = b[n:]
+ }
+ mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val)
+ out.n = n
+ return out, nil
+}
+
+func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ if f.mi == nil {
+ key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
+ val := mapi.conv.valConv.PBValueOf(valrv)
+ size := 0
+ size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
+ size += mapi.valFuncs.size(val, mapValTagSize, opts)
+ b = protowire.AppendVarint(b, uint64(size))
+ b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
+ if err != nil {
+ return nil, err
+ }
+ return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
+ } else {
+ key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
+ val := pointerOfValue(valrv)
+ valSize := f.mi.sizePointer(val, opts)
+ size := 0
+ size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
+ size += mapValTagSize + protowire.SizeBytes(valSize)
+ b = protowire.AppendVarint(b, uint64(size))
+ b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
+ if err != nil {
+ return nil, err
+ }
+ b = protowire.AppendVarint(b, mapi.valWiretag)
+ b = protowire.AppendVarint(b, uint64(valSize))
+ return f.mi.marshalAppendPointer(b, val, opts)
+ }
+}
+
+func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ if mapv.Len() == 0 {
+ return b, nil
+ }
+ if opts.Deterministic() {
+ return appendMapDeterministic(b, mapv, mapi, f, opts)
+ }
+ iter := mapRange(mapv)
+ for iter.Next() {
+ var err error
+ b = protowire.AppendVarint(b, f.wiretag)
+ b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ keys := mapv.MapKeys()
+ sort.Slice(keys, func(i, j int) bool {
+ switch keys[i].Kind() {
+ case reflect.Bool:
+ return !keys[i].Bool() && keys[j].Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return keys[i].Int() < keys[j].Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return keys[i].Uint() < keys[j].Uint()
+ case reflect.Float32, reflect.Float64:
+ return keys[i].Float() < keys[j].Float()
+ case reflect.String:
+ return keys[i].String() < keys[j].String()
+ default:
+ panic("invalid kind: " + keys[i].Kind().String())
+ }
+ })
+ for _, key := range keys {
+ var err error
+ b = protowire.AppendVarint(b, f.wiretag)
+ b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
+ if mi := f.mi; mi != nil {
+ mi.init()
+ if !mi.needsInitCheck {
+ return nil
+ }
+ iter := mapRange(mapv)
+ for iter.Next() {
+ val := pointerOfValue(iter.Value())
+ if err := mi.checkInitializedPointer(val); err != nil {
+ return err
+ }
+ }
+ } else {
+ iter := mapRange(mapv)
+ for iter.Next() {
+ val := mapi.conv.valConv.PBValueOf(iter.Value())
+ if err := mapi.valFuncs.isInit(val); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstm := dst.AsValueOf(f.ft).Elem()
+ srcm := src.AsValueOf(f.ft).Elem()
+ if srcm.Len() == 0 {
+ return
+ }
+ if dstm.IsNil() {
+ dstm.Set(reflect.MakeMap(f.ft))
+ }
+ iter := mapRange(srcm)
+ for iter.Next() {
+ dstm.SetMapIndex(iter.Key(), iter.Value())
+ }
+}
+
+func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstm := dst.AsValueOf(f.ft).Elem()
+ srcm := src.AsValueOf(f.ft).Elem()
+ if srcm.Len() == 0 {
+ return
+ }
+ if dstm.IsNil() {
+ dstm.Set(reflect.MakeMap(f.ft))
+ }
+ iter := mapRange(srcm)
+ for iter.Next() {
+ dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...)))
+ }
+}
+
+func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstm := dst.AsValueOf(f.ft).Elem()
+ srcm := src.AsValueOf(f.ft).Elem()
+ if srcm.Len() == 0 {
+ return
+ }
+ if dstm.IsNil() {
+ dstm.Set(reflect.MakeMap(f.ft))
+ }
+ iter := mapRange(srcm)
+ for iter.Next() {
+ val := reflect.New(f.ft.Elem().Elem())
+ if f.mi != nil {
+ f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts)
+ } else {
+ opts.Merge(asMessage(val), asMessage(iter.Value()))
+ }
+ dstm.SetMapIndex(iter.Key(), val)
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
new file mode 100644
index 0000000..2706bb6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.12
+
+package impl
+
+import "reflect"
+
+type mapIter struct {
+ v reflect.Value
+ keys []reflect.Value
+}
+
+// mapRange provides a less-efficient equivalent to
+// the Go 1.12 reflect.Value.MapRange method.
+func mapRange(v reflect.Value) *mapIter {
+ return &mapIter{v: v}
+}
+
+func (i *mapIter) Next() bool {
+ if i.keys == nil {
+ i.keys = i.v.MapKeys()
+ } else {
+ i.keys = i.keys[1:]
+ }
+ return len(i.keys) > 0
+}
+
+func (i *mapIter) Key() reflect.Value {
+ return i.keys[0]
+}
+
+func (i *mapIter) Value() reflect.Value {
+ return i.v.MapIndex(i.keys[0])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
new file mode 100644
index 0000000..1533ef6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+
+package impl
+
+import "reflect"
+
+func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
new file mode 100644
index 0000000..cd40527
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -0,0 +1,217 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/order"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// coderMessageInfo contains per-message information used by the fast-path functions.
+// This is a different type from MessageInfo to keep MessageInfo as general-purpose as
+// possible.
+type coderMessageInfo struct {
+ methods piface.Methods
+
+ orderedCoderFields []*coderFieldInfo
+ denseCoderFields []*coderFieldInfo
+ coderFields map[protowire.Number]*coderFieldInfo
+ sizecacheOffset offset
+ unknownOffset offset
+ unknownPtrKind bool
+ extensionOffset offset
+ needsInitCheck bool
+ isMessageSet bool
+ numRequiredFields uint8
+}
+
+type coderFieldInfo struct {
+ funcs pointerCoderFuncs // fast-path per-field functions
+ mi *MessageInfo // field's message
+ ft reflect.Type
+ validation validationInfo // information used by message validation
+ num pref.FieldNumber // field number
+ offset offset // struct field offset
+ wiretag uint64 // field tag (number + wire type)
+ tagsize int // size of the varint-encoded tag
+ isPointer bool // true if IsNil may be called on the struct field
+ isRequired bool // true if field is required
+}
+
+func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
+ mi.sizecacheOffset = invalidOffset
+ mi.unknownOffset = invalidOffset
+ mi.extensionOffset = invalidOffset
+
+ if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
+ mi.sizecacheOffset = si.sizecacheOffset
+ }
+ if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) {
+ mi.unknownOffset = si.unknownOffset
+ mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
+ }
+ if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType {
+ mi.extensionOffset = si.extensionOffset
+ }
+
+ mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
+ fields := mi.Desc.Fields()
+ preallocFields := make([]coderFieldInfo, fields.Len())
+ for i := 0; i < fields.Len(); i++ {
+ fd := fields.Get(i)
+
+ fs := si.fieldsByNumber[fd.Number()]
+ isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+ if isOneof {
+ fs = si.oneofsByName[fd.ContainingOneof().Name()]
+ }
+ ft := fs.Type
+ var wiretag uint64
+ if !fd.IsPacked() {
+ wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
+ } else {
+ wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
+ }
+ var fieldOffset offset
+ var funcs pointerCoderFuncs
+ var childMessage *MessageInfo
+ switch {
+ case ft == nil:
+ // This never occurs for generated message types.
+ // It implies that a hand-crafted type has missing Go fields
+ // for specific protobuf message fields.
+ funcs = pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return 0
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return nil, nil
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ }
+ case isOneof:
+ fieldOffset = offsetOf(fs, mi.Exporter)
+ case fd.IsWeak():
+ fieldOffset = si.weakOffset
+ funcs = makeWeakMessageFieldCoder(fd)
+ default:
+ fieldOffset = offsetOf(fs, mi.Exporter)
+ childMessage, funcs = fieldCoder(fd, ft)
+ }
+ cf := &preallocFields[i]
+ *cf = coderFieldInfo{
+ num: fd.Number(),
+ offset: fieldOffset,
+ wiretag: wiretag,
+ ft: ft,
+ tagsize: protowire.SizeVarint(wiretag),
+ funcs: funcs,
+ mi: childMessage,
+ validation: newFieldValidationInfo(mi, si, fd, ft),
+ isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(),
+ isRequired: fd.Cardinality() == pref.Required,
+ }
+ mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
+ mi.coderFields[cf.num] = cf
+ }
+ for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
+ if od := oneofs.Get(i); !od.IsSynthetic() {
+ mi.initOneofFieldCoders(od, si)
+ }
+ }
+ if messageset.IsMessageSet(mi.Desc) {
+ if !mi.extensionOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
+ }
+ if !mi.unknownOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
+ }
+ mi.isMessageSet = true
+ }
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
+ })
+
+ var maxDense pref.FieldNumber
+ for _, cf := range mi.orderedCoderFields {
+ if cf.num >= 16 && cf.num >= 2*maxDense {
+ break
+ }
+ maxDense = cf.num
+ }
+ mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
+ for _, cf := range mi.orderedCoderFields {
+ if int(cf.num) >= len(mi.denseCoderFields) {
+ break
+ }
+ mi.denseCoderFields[cf.num] = cf
+ }
+
+ // To preserve compatibility with historic wire output, marshal oneofs last.
+ if mi.Desc.Oneofs().Len() > 0 {
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ fi := fields.ByNumber(mi.orderedCoderFields[i].num)
+ fj := fields.ByNumber(mi.orderedCoderFields[j].num)
+ return order.LegacyFieldOrder(fi, fj)
+ })
+ }
+
+ mi.needsInitCheck = needsInitCheck(mi.Desc)
+ if mi.methods.Marshal == nil && mi.methods.Size == nil {
+ mi.methods.Flags |= piface.SupportMarshalDeterministic
+ mi.methods.Marshal = mi.marshal
+ mi.methods.Size = mi.size
+ }
+ if mi.methods.Unmarshal == nil {
+ mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
+ mi.methods.Unmarshal = mi.unmarshal
+ }
+ if mi.methods.CheckInitialized == nil {
+ mi.methods.CheckInitialized = mi.checkInitialized
+ }
+ if mi.methods.Merge == nil {
+ mi.methods.Merge = mi.merge
+ }
+}
+
+// getUnknownBytes returns a *[]byte for the unknown fields.
+// It is the caller's responsibility to check whether the pointer is nil.
+// This function is specially designed to be inlineable.
+func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte {
+ if mi.unknownPtrKind {
+ return *p.Apply(mi.unknownOffset).BytesPtr()
+ } else {
+ return p.Apply(mi.unknownOffset).Bytes()
+ }
+}
+
+// mutableUnknownBytes returns a *[]byte for the unknown fields.
+// The returned pointer is guaranteed to not be nil.
+func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte {
+ if mi.unknownPtrKind {
+ bp := p.Apply(mi.unknownOffset).BytesPtr()
+ if *bp == nil {
+ *bp = new([]byte)
+ }
+ return *bp
+ } else {
+ return p.Apply(mi.unknownOffset).Bytes()
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
new file mode 100644
index 0000000..b7a23fa
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
@@ -0,0 +1,123 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+)
+
+func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) {
+ if !flags.ProtoLegacy {
+ return 0
+ }
+
+ ext := *p.Apply(mi.extensionOffset).Extensions()
+ for _, x := range ext {
+ xi := getExtensionFieldInfo(x.Type())
+ if xi.funcs.size == nil {
+ continue
+ }
+ num, _ := protowire.DecodeTag(xi.wiretag)
+ size += messageset.SizeField(num)
+ size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
+ }
+
+ if u := mi.getUnknownBytes(p); u != nil {
+ size += messageset.SizeUnknown(*u)
+ }
+
+ return size
+}
+
+func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) {
+ if !flags.ProtoLegacy {
+ return b, errors.New("no support for message_set_wire_format")
+ }
+
+ ext := *p.Apply(mi.extensionOffset).Extensions()
+ switch len(ext) {
+ case 0:
+ case 1:
+ // Fast-path for one extension: Don't bother sorting the keys.
+ for _, x := range ext {
+ var err error
+ b, err = marshalMessageSetField(mi, b, x, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ default:
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(ext))
+ for k := range ext {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ for _, k := range keys {
+ var err error
+ b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ }
+
+ if u := mi.getUnknownBytes(p); u != nil {
+ var err error
+ b, err = messageset.AppendUnknown(b, *u)
+ if err != nil {
+ return b, err
+ }
+ }
+
+ return b, nil
+}
+
+func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) {
+ xi := getExtensionFieldInfo(x.Type())
+ num, _ := protowire.DecodeTag(xi.wiretag)
+ b = messageset.AppendFieldStart(b, num)
+ b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts)
+ if err != nil {
+ return b, err
+ }
+ b = messageset.AppendFieldEnd(b)
+ return b, nil
+}
+
+func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if !flags.ProtoLegacy {
+ return out, errors.New("no support for message_set_wire_format")
+ }
+
+ ep := p.Apply(mi.extensionOffset).Extensions()
+ if *ep == nil {
+ *ep = make(map[int32]ExtensionField)
+ }
+ ext := *ep
+ initialized := true
+ err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error {
+ o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts)
+ if err == errUnknown {
+ u := mi.mutableUnknownBytes(p)
+ *u = protowire.AppendTag(*u, num, protowire.BytesType)
+ *u = append(*u, v...)
+ return nil
+ }
+ if !o.initialized {
+ initialized = false
+ }
+ return err
+ })
+ out.n = len(b)
+ out.initialized = initialized
+ return out, err
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
new file mode 100644
index 0000000..90705e3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
@@ -0,0 +1,209 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+)
+
+func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := p.v.Elem().Int()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := p.v.Elem().Int()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ p.v.Elem().SetInt(int64(v))
+ out.n = n
+ return out, nil
+}
+
+func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ dst.v.Elem().Set(src.v.Elem())
+}
+
+var coderEnum = pointerCoderFuncs{
+ size: sizeEnum,
+ marshal: appendEnum,
+ unmarshal: consumeEnum,
+ merge: mergeEnum,
+}
+
+func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ if p.v.Elem().Int() == 0 {
+ return 0
+ }
+ return sizeEnum(p, f, opts)
+}
+
+func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ if p.v.Elem().Int() == 0 {
+ return b, nil
+ }
+ return appendEnum(b, p, f, opts)
+}
+
+func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ if src.v.Elem().Int() != 0 {
+ dst.v.Elem().Set(src.v.Elem())
+ }
+}
+
+var coderEnumNoZero = pointerCoderFuncs{
+ size: sizeEnumNoZero,
+ marshal: appendEnumNoZero,
+ unmarshal: consumeEnum,
+ merge: mergeEnumNoZero,
+}
+
+func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return sizeEnum(pointer{p.v.Elem()}, f, opts)
+}
+
+func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendEnum(b, pointer{p.v.Elem()}, f, opts)
+}
+
+func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ if p.v.Elem().IsNil() {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
+ }
+ return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
+}
+
+func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ if !src.v.Elem().IsNil() {
+ v := reflect.New(dst.v.Type().Elem().Elem())
+ v.Elem().Set(src.v.Elem().Elem())
+ dst.v.Elem().Set(v)
+ }
+}
+
+var coderEnumPtr = pointerCoderFuncs{
+ size: sizeEnumPtr,
+ marshal: appendEnumPtr,
+ unmarshal: consumeEnumPtr,
+ merge: mergeEnumPtr,
+}
+
+func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.v.Elem()
+ for i, llen := 0, s.Len(); i < llen; i++ {
+ size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
+ }
+ return size
+}
+
+func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.v.Elem()
+ for i, llen := 0, s.Len(); i < llen; i++ {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
+ }
+ return b, nil
+}
+
+func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ s := p.v.Elem()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ rv := reflect.New(s.Type().Elem()).Elem()
+ rv.SetInt(int64(v))
+ s.Set(reflect.Append(s, rv))
+ b = b[n:]
+ }
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ rv := reflect.New(s.Type().Elem()).Elem()
+ rv.SetInt(int64(v))
+ s.Set(reflect.Append(s, rv))
+ out.n = n
+ return out, nil
+}
+
+func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
+}
+
+var coderEnumSlice = pointerCoderFuncs{
+ size: sizeEnumSlice,
+ marshal: appendEnumSlice,
+ unmarshal: consumeEnumSlice,
+ merge: mergeEnumSlice,
+}
+
+func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.v.Elem()
+ llen := s.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i := 0; i < llen; i++ {
+ n += protowire.SizeVarint(uint64(s.Index(i).Int()))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.v.Elem()
+ llen := s.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ n += protowire.SizeVarint(uint64(s.Index(i).Int()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
+ }
+ return b, nil
+}
+
+var coderEnumPackedSlice = pointerCoderFuncs{
+ size: sizeEnumPackedSlice,
+ marshal: appendEnumPackedSlice,
+ unmarshal: consumeEnumSlice,
+ merge: mergeEnumSlice,
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
new file mode 100644
index 0000000..e899712
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
@@ -0,0 +1,557 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// pointerCoderFuncs is a set of pointer encoding functions.
+type pointerCoderFuncs struct {
+ mi *MessageInfo
+ size func(p pointer, f *coderFieldInfo, opts marshalOptions) int
+ marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error)
+ unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error)
+ isInit func(p pointer, f *coderFieldInfo) error
+ merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions)
+}
+
+// valueCoderFuncs is a set of protoreflect.Value encoding functions.
+type valueCoderFuncs struct {
+ size func(v pref.Value, tagsize int, opts marshalOptions) int
+ marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error)
+ unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error)
+ isInit func(v pref.Value) error
+ merge func(dst, src pref.Value, opts mergeOptions) pref.Value
+}
+
+// fieldCoder returns pointer functions for a field, used for operating on
+// struct fields.
+func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+ switch {
+ case fd.IsMap():
+ return encoderFuncsForMap(fd, ft)
+ case fd.Cardinality() == pref.Repeated && !fd.IsPacked():
+ // Repeated fields (not packed).
+ if ft.Kind() != reflect.Slice {
+ break
+ }
+ ft := ft.Elem()
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolSlice
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumSlice
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32Slice
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32Slice
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32Slice
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64Slice
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64Slice
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64Slice
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32Slice
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32Slice
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatSlice
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64Slice
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64Slice
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoubleSlice
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringSliceValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderStringSlice
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) {
+ return nil, coderBytesSliceValidateUTF8
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesSlice
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderStringSlice
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesSlice
+ }
+ case pref.MessageKind:
+ return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft)
+ case pref.GroupKind:
+ return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft)
+ }
+ case fd.Cardinality() == pref.Repeated && fd.IsPacked():
+ // Packed repeated fields.
+ //
+ // Only repeated fields of primitive numeric types
+ // (Varint, Fixed32, or Fixed64 wire type) can be packed.
+ if ft.Kind() != reflect.Slice {
+ break
+ }
+ ft := ft.Elem()
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolPackedSlice
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumPackedSlice
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32PackedSlice
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32PackedSlice
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32PackedSlice
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64PackedSlice
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64PackedSlice
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64PackedSlice
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32PackedSlice
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32PackedSlice
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatPackedSlice
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64PackedSlice
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64PackedSlice
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoublePackedSlice
+ }
+ }
+ case fd.Kind() == pref.MessageKind:
+ return getMessageInfo(ft), makeMessageFieldCoder(fd, ft)
+ case fd.Kind() == pref.GroupKind:
+ return getMessageInfo(ft), makeGroupFieldCoder(fd, ft)
+ case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil:
+ // Populated oneof fields always encode even if set to the zero value,
+ // which normally are not encoded in proto3.
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolNoZero
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumNoZero
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32NoZero
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32NoZero
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32NoZero
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64NoZero
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64NoZero
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64NoZero
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32NoZero
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32NoZero
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatNoZero
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64NoZero
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64NoZero
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoubleNoZero
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringNoZeroValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderStringNoZero
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) {
+ return nil, coderBytesNoZeroValidateUTF8
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesNoZero
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderStringNoZero
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesNoZero
+ }
+ }
+ case ft.Kind() == reflect.Ptr:
+ ft := ft.Elem()
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolPtr
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumPtr
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32Ptr
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32Ptr
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32Ptr
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64Ptr
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64Ptr
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64Ptr
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32Ptr
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32Ptr
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatPtr
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64Ptr
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64Ptr
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoublePtr
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringPtrValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderStringPtr
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderStringPtr
+ }
+ }
+ default:
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBool
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnum
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloat
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDouble
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderString
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) {
+ return nil, coderBytesValidateUTF8
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytes
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderString
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytes
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft))
+}
+
+// encoderFuncsForValue returns value functions for a field, used for
+// extension values and map encoding.
+func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs {
+ switch {
+ case fd.Cardinality() == pref.Repeated && !fd.IsPacked():
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return coderBoolSliceValue
+ case pref.EnumKind:
+ return coderEnumSliceValue
+ case pref.Int32Kind:
+ return coderInt32SliceValue
+ case pref.Sint32Kind:
+ return coderSint32SliceValue
+ case pref.Uint32Kind:
+ return coderUint32SliceValue
+ case pref.Int64Kind:
+ return coderInt64SliceValue
+ case pref.Sint64Kind:
+ return coderSint64SliceValue
+ case pref.Uint64Kind:
+ return coderUint64SliceValue
+ case pref.Sfixed32Kind:
+ return coderSfixed32SliceValue
+ case pref.Fixed32Kind:
+ return coderFixed32SliceValue
+ case pref.FloatKind:
+ return coderFloatSliceValue
+ case pref.Sfixed64Kind:
+ return coderSfixed64SliceValue
+ case pref.Fixed64Kind:
+ return coderFixed64SliceValue
+ case pref.DoubleKind:
+ return coderDoubleSliceValue
+ case pref.StringKind:
+ // We don't have a UTF-8 validating coder for repeated string fields.
+ // Value coders are used for extensions and maps.
+ // Extensions are never proto3, and maps never contain lists.
+ return coderStringSliceValue
+ case pref.BytesKind:
+ return coderBytesSliceValue
+ case pref.MessageKind:
+ return coderMessageSliceValue
+ case pref.GroupKind:
+ return coderGroupSliceValue
+ }
+ case fd.Cardinality() == pref.Repeated && fd.IsPacked():
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return coderBoolPackedSliceValue
+ case pref.EnumKind:
+ return coderEnumPackedSliceValue
+ case pref.Int32Kind:
+ return coderInt32PackedSliceValue
+ case pref.Sint32Kind:
+ return coderSint32PackedSliceValue
+ case pref.Uint32Kind:
+ return coderUint32PackedSliceValue
+ case pref.Int64Kind:
+ return coderInt64PackedSliceValue
+ case pref.Sint64Kind:
+ return coderSint64PackedSliceValue
+ case pref.Uint64Kind:
+ return coderUint64PackedSliceValue
+ case pref.Sfixed32Kind:
+ return coderSfixed32PackedSliceValue
+ case pref.Fixed32Kind:
+ return coderFixed32PackedSliceValue
+ case pref.FloatKind:
+ return coderFloatPackedSliceValue
+ case pref.Sfixed64Kind:
+ return coderSfixed64PackedSliceValue
+ case pref.Fixed64Kind:
+ return coderFixed64PackedSliceValue
+ case pref.DoubleKind:
+ return coderDoublePackedSliceValue
+ }
+ default:
+ switch fd.Kind() {
+ default:
+ case pref.BoolKind:
+ return coderBoolValue
+ case pref.EnumKind:
+ return coderEnumValue
+ case pref.Int32Kind:
+ return coderInt32Value
+ case pref.Sint32Kind:
+ return coderSint32Value
+ case pref.Uint32Kind:
+ return coderUint32Value
+ case pref.Int64Kind:
+ return coderInt64Value
+ case pref.Sint64Kind:
+ return coderSint64Value
+ case pref.Uint64Kind:
+ return coderUint64Value
+ case pref.Sfixed32Kind:
+ return coderSfixed32Value
+ case pref.Fixed32Kind:
+ return coderFixed32Value
+ case pref.FloatKind:
+ return coderFloatValue
+ case pref.Sfixed64Kind:
+ return coderSfixed64Value
+ case pref.Fixed64Kind:
+ return coderFixed64Value
+ case pref.DoubleKind:
+ return coderDoubleValue
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ return coderStringValueValidateUTF8
+ }
+ return coderStringValue
+ case pref.BytesKind:
+ return coderBytesValue
+ case pref.MessageKind:
+ return coderMessageValue
+ case pref.GroupKind:
+ return coderGroupValue
+ }
+ }
+ panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind()))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
new file mode 100644
index 0000000..e118af1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package impl
+
+// When using unsafe pointers, we can just treat enum values as int32s.
+
+var (
+ coderEnumNoZero = coderInt32NoZero
+ coderEnum = coderInt32
+ coderEnumPtr = coderInt32Ptr
+ coderEnumSlice = coderInt32Slice
+ coderEnumPackedSlice = coderInt32PackedSlice
+)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
new file mode 100644
index 0000000..acd61bb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -0,0 +1,496 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// unwrapper unwraps the value to the underlying value.
+// This is implemented by List and Map.
+type unwrapper interface {
+ protoUnwrap() interface{}
+}
+
+// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.
+type Converter interface {
+ // PBValueOf converts a reflect.Value to a protoreflect.Value.
+ PBValueOf(reflect.Value) pref.Value
+
+ // GoValueOf converts a protoreflect.Value to a reflect.Value.
+ GoValueOf(pref.Value) reflect.Value
+
+ // IsValidPB returns whether a protoreflect.Value is compatible with this type.
+ IsValidPB(pref.Value) bool
+
+ // IsValidGo returns whether a reflect.Value is compatible with this type.
+ IsValidGo(reflect.Value) bool
+
+ // New returns a new field value.
+ // For scalars, it returns the default value of the field.
+ // For composite types, it returns a new mutable value.
+ New() pref.Value
+
+ // Zero returns a new field value.
+ // For scalars, it returns the default value of the field.
+ // For composite types, it returns an immutable, empty value.
+ Zero() pref.Value
+}
+
+// NewConverter matches a Go type with a protobuf field and returns a Converter
+// that converts between the two. Enums must be a named int32 kind that
+// implements protoreflect.Enum, and messages must be pointer to a named
+// struct type that implements protoreflect.ProtoMessage.
+//
+// This matcher deliberately supports a wider range of Go types than what
+// protoc-gen-go historically generated to be able to automatically wrap some
+// v1 messages generated by other forks of protoc-gen-go.
+func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+ switch {
+ case fd.IsList():
+ return newListConverter(t, fd)
+ case fd.IsMap():
+ return newMapConverter(t, fd)
+ default:
+ return newSingularConverter(t, fd)
+ }
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+}
+
+var (
+ boolType = reflect.TypeOf(bool(false))
+ int32Type = reflect.TypeOf(int32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float32Type = reflect.TypeOf(float32(0))
+ float64Type = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf(string(""))
+ bytesType = reflect.TypeOf([]byte(nil))
+ byteType = reflect.TypeOf(byte(0))
+)
+
+var (
+ boolZero = pref.ValueOfBool(false)
+ int32Zero = pref.ValueOfInt32(0)
+ int64Zero = pref.ValueOfInt64(0)
+ uint32Zero = pref.ValueOfUint32(0)
+ uint64Zero = pref.ValueOfUint64(0)
+ float32Zero = pref.ValueOfFloat32(0)
+ float64Zero = pref.ValueOfFloat64(0)
+ stringZero = pref.ValueOfString("")
+ bytesZero = pref.ValueOfBytes(nil)
+)
+
+func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+ defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value {
+ if fd.Cardinality() == pref.Repeated {
+ // Default isn't defined for repeated fields.
+ return zero
+ }
+ return fd.Default()
+ }
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if t.Kind() == reflect.Bool {
+ return &boolConverter{t, defVal(fd, boolZero)}
+ }
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if t.Kind() == reflect.Int32 {
+ return &int32Converter{t, defVal(fd, int32Zero)}
+ }
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if t.Kind() == reflect.Int64 {
+ return &int64Converter{t, defVal(fd, int64Zero)}
+ }
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if t.Kind() == reflect.Uint32 {
+ return &uint32Converter{t, defVal(fd, uint32Zero)}
+ }
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if t.Kind() == reflect.Uint64 {
+ return &uint64Converter{t, defVal(fd, uint64Zero)}
+ }
+ case pref.FloatKind:
+ if t.Kind() == reflect.Float32 {
+ return &float32Converter{t, defVal(fd, float32Zero)}
+ }
+ case pref.DoubleKind:
+ if t.Kind() == reflect.Float64 {
+ return &float64Converter{t, defVal(fd, float64Zero)}
+ }
+ case pref.StringKind:
+ if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) {
+ return &stringConverter{t, defVal(fd, stringZero)}
+ }
+ case pref.BytesKind:
+ if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) {
+ return &bytesConverter{t, defVal(fd, bytesZero)}
+ }
+ case pref.EnumKind:
+ // Handle enums, which must be a named int32 type.
+ if t.Kind() == reflect.Int32 {
+ return newEnumConverter(t, fd)
+ }
+ case pref.MessageKind, pref.GroupKind:
+ return newMessageConverter(t)
+ }
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+}
+
+type boolConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfBool(v.Bool())
+}
+func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(v.Bool()).Convert(c.goType)
+}
+func (c *boolConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(bool)
+ return ok
+}
+func (c *boolConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *boolConverter) New() pref.Value { return c.def }
+func (c *boolConverter) Zero() pref.Value { return c.def }
+
+type int32Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfInt32(int32(v.Int()))
+}
+func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(int32(v.Int())).Convert(c.goType)
+}
+func (c *int32Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(int32)
+ return ok
+}
+func (c *int32Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *int32Converter) New() pref.Value { return c.def }
+func (c *int32Converter) Zero() pref.Value { return c.def }
+
+type int64Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfInt64(int64(v.Int()))
+}
+func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(int64(v.Int())).Convert(c.goType)
+}
+func (c *int64Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(int64)
+ return ok
+}
+func (c *int64Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *int64Converter) New() pref.Value { return c.def }
+func (c *int64Converter) Zero() pref.Value { return c.def }
+
+type uint32Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfUint32(uint32(v.Uint()))
+}
+func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType)
+}
+func (c *uint32Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(uint32)
+ return ok
+}
+func (c *uint32Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *uint32Converter) New() pref.Value { return c.def }
+func (c *uint32Converter) Zero() pref.Value { return c.def }
+
+type uint64Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfUint64(uint64(v.Uint()))
+}
+func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType)
+}
+func (c *uint64Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(uint64)
+ return ok
+}
+func (c *uint64Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *uint64Converter) New() pref.Value { return c.def }
+func (c *uint64Converter) Zero() pref.Value { return c.def }
+
+type float32Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfFloat32(float32(v.Float()))
+}
+func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(float32(v.Float())).Convert(c.goType)
+}
+func (c *float32Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(float32)
+ return ok
+}
+func (c *float32Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *float32Converter) New() pref.Value { return c.def }
+func (c *float32Converter) Zero() pref.Value { return c.def }
+
+type float64Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfFloat64(float64(v.Float()))
+}
+func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(float64(v.Float())).Convert(c.goType)
+}
+func (c *float64Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(float64)
+ return ok
+}
+func (c *float64Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *float64Converter) New() pref.Value { return c.def }
+func (c *float64Converter) Zero() pref.Value { return c.def }
+
+type stringConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfString(v.Convert(stringType).String())
+}
+func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value {
+ // pref.Value.String never panics, so we go through an interface
+ // conversion here to check the type.
+ s := v.Interface().(string)
+ if c.goType.Kind() == reflect.Slice && s == "" {
+ return reflect.Zero(c.goType) // ensure empty string is []byte(nil)
+ }
+ return reflect.ValueOf(s).Convert(c.goType)
+}
+func (c *stringConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(string)
+ return ok
+}
+func (c *stringConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *stringConverter) New() pref.Value { return c.def }
+func (c *stringConverter) Zero() pref.Value { return c.def }
+
+type bytesConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ if c.goType.Kind() == reflect.String && v.Len() == 0 {
+ return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil)
+ }
+ return pref.ValueOfBytes(v.Convert(bytesType).Bytes())
+}
+func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(v.Bytes()).Convert(c.goType)
+}
+func (c *bytesConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().([]byte)
+ return ok
+}
+func (c *bytesConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *bytesConverter) New() pref.Value { return c.def }
+func (c *bytesConverter) Zero() pref.Value { return c.def }
+
+type enumConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter {
+ var def pref.Value
+ if fd.Cardinality() == pref.Repeated {
+ def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number())
+ } else {
+ def = fd.Default()
+ }
+ return &enumConverter{goType, def}
+}
+
+func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfEnum(pref.EnumNumber(v.Int()))
+}
+
+func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(v.Enum()).Convert(c.goType)
+}
+
+func (c *enumConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(pref.EnumNumber)
+ return ok
+}
+
+func (c *enumConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *enumConverter) New() pref.Value {
+ return c.def
+}
+
+func (c *enumConverter) Zero() pref.Value {
+ return c.def
+}
+
+type messageConverter struct {
+ goType reflect.Type
+}
+
+func newMessageConverter(goType reflect.Type) Converter {
+ return &messageConverter{goType}
+}
+
+func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ if c.isNonPointer() {
+ if v.CanAddr() {
+ v = v.Addr() // T => *T
+ } else {
+ v = reflect.Zero(reflect.PtrTo(v.Type()))
+ }
+ }
+ if m, ok := v.Interface().(pref.ProtoMessage); ok {
+ return pref.ValueOfMessage(m.ProtoReflect())
+ }
+ return pref.ValueOfMessage(legacyWrapMessage(v))
+}
+
+func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value {
+ m := v.Message()
+ var rv reflect.Value
+ if u, ok := m.(unwrapper); ok {
+ rv = reflect.ValueOf(u.protoUnwrap())
+ } else {
+ rv = reflect.ValueOf(m.Interface())
+ }
+ if c.isNonPointer() {
+ if rv.Type() != reflect.PtrTo(c.goType) {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType)))
+ }
+ if !rv.IsNil() {
+ rv = rv.Elem() // *T => T
+ } else {
+ rv = reflect.Zero(rv.Type().Elem())
+ }
+ }
+ if rv.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType))
+ }
+ return rv
+}
+
+func (c *messageConverter) IsValidPB(v pref.Value) bool {
+ m := v.Message()
+ var rv reflect.Value
+ if u, ok := m.(unwrapper); ok {
+ rv = reflect.ValueOf(u.protoUnwrap())
+ } else {
+ rv = reflect.ValueOf(m.Interface())
+ }
+ if c.isNonPointer() {
+ return rv.Type() == reflect.PtrTo(c.goType)
+ }
+ return rv.Type() == c.goType
+}
+
+func (c *messageConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *messageConverter) New() pref.Value {
+ if c.isNonPointer() {
+ return c.PBValueOf(reflect.New(c.goType).Elem())
+ }
+ return c.PBValueOf(reflect.New(c.goType.Elem()))
+}
+
+func (c *messageConverter) Zero() pref.Value {
+ return c.PBValueOf(reflect.Zero(c.goType))
+}
+
+// isNonPointer reports whether the type is a non-pointer type.
+// This never occurs for generated message types.
+func (c *messageConverter) isNonPointer() bool {
+ return c.goType.Kind() != reflect.Ptr
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
new file mode 100644
index 0000000..6fccab5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
@@ -0,0 +1,141 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+ switch {
+ case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice:
+ return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)}
+ case t.Kind() == reflect.Slice:
+ return &listConverter{t, newSingularConverter(t.Elem(), fd)}
+ }
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+}
+
+type listConverter struct {
+ goType reflect.Type // []T
+ c Converter
+}
+
+func (c *listConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ pv := reflect.New(c.goType)
+ pv.Elem().Set(v)
+ return pref.ValueOfList(&listReflect{pv, c.c})
+}
+
+func (c *listConverter) GoValueOf(v pref.Value) reflect.Value {
+ rv := v.List().(*listReflect).v
+ if rv.IsNil() {
+ return reflect.Zero(c.goType)
+ }
+ return rv.Elem()
+}
+
+func (c *listConverter) IsValidPB(v pref.Value) bool {
+ list, ok := v.Interface().(*listReflect)
+ if !ok {
+ return false
+ }
+ return list.v.Type().Elem() == c.goType
+}
+
+func (c *listConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *listConverter) New() pref.Value {
+ return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c})
+}
+
+func (c *listConverter) Zero() pref.Value {
+ return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c})
+}
+
+type listPtrConverter struct {
+ goType reflect.Type // *[]T
+ c Converter
+}
+
+func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfList(&listReflect{v, c.c})
+}
+
+func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value {
+ return v.List().(*listReflect).v
+}
+
+func (c *listPtrConverter) IsValidPB(v pref.Value) bool {
+ list, ok := v.Interface().(*listReflect)
+ if !ok {
+ return false
+ }
+ return list.v.Type() == c.goType
+}
+
+func (c *listPtrConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *listPtrConverter) New() pref.Value {
+ return c.PBValueOf(reflect.New(c.goType.Elem()))
+}
+
+func (c *listPtrConverter) Zero() pref.Value {
+ return c.PBValueOf(reflect.Zero(c.goType))
+}
+
+type listReflect struct {
+ v reflect.Value // *[]T
+ conv Converter
+}
+
+func (ls *listReflect) Len() int {
+ if ls.v.IsNil() {
+ return 0
+ }
+ return ls.v.Elem().Len()
+}
+func (ls *listReflect) Get(i int) pref.Value {
+ return ls.conv.PBValueOf(ls.v.Elem().Index(i))
+}
+func (ls *listReflect) Set(i int, v pref.Value) {
+ ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v))
+}
+func (ls *listReflect) Append(v pref.Value) {
+ ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v)))
+}
+func (ls *listReflect) AppendMutable() pref.Value {
+ if _, ok := ls.conv.(*messageConverter); !ok {
+ panic("invalid AppendMutable on list with non-message type")
+ }
+ v := ls.NewElement()
+ ls.Append(v)
+ return v
+}
+func (ls *listReflect) Truncate(i int) {
+ ls.v.Elem().Set(ls.v.Elem().Slice(0, i))
+}
+func (ls *listReflect) NewElement() pref.Value {
+ return ls.conv.New()
+}
+func (ls *listReflect) IsValid() bool {
+ return !ls.v.IsNil()
+}
+func (ls *listReflect) protoUnwrap() interface{} {
+ return ls.v.Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
new file mode 100644
index 0000000..de06b25
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -0,0 +1,121 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type mapConverter struct {
+ goType reflect.Type // map[K]V
+ keyConv, valConv Converter
+}
+
+func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter {
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+ }
+ return &mapConverter{
+ goType: t,
+ keyConv: newSingularConverter(t.Key(), fd.MapKey()),
+ valConv: newSingularConverter(t.Elem(), fd.MapValue()),
+ }
+}
+
+func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv})
+}
+
+func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value {
+ return v.Map().(*mapReflect).v
+}
+
+func (c *mapConverter) IsValidPB(v pref.Value) bool {
+ mapv, ok := v.Interface().(*mapReflect)
+ if !ok {
+ return false
+ }
+ return mapv.v.Type() == c.goType
+}
+
+func (c *mapConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *mapConverter) New() pref.Value {
+ return c.PBValueOf(reflect.MakeMap(c.goType))
+}
+
+func (c *mapConverter) Zero() pref.Value {
+ return c.PBValueOf(reflect.Zero(c.goType))
+}
+
+type mapReflect struct {
+ v reflect.Value // map[K]V
+ keyConv Converter
+ valConv Converter
+}
+
+func (ms *mapReflect) Len() int {
+ return ms.v.Len()
+}
+func (ms *mapReflect) Has(k pref.MapKey) bool {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ rv := ms.v.MapIndex(rk)
+ return rv.IsValid()
+}
+func (ms *mapReflect) Get(k pref.MapKey) pref.Value {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ rv := ms.v.MapIndex(rk)
+ if !rv.IsValid() {
+ return pref.Value{}
+ }
+ return ms.valConv.PBValueOf(rv)
+}
+func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ rv := ms.valConv.GoValueOf(v)
+ ms.v.SetMapIndex(rk, rv)
+}
+func (ms *mapReflect) Clear(k pref.MapKey) {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ ms.v.SetMapIndex(rk, reflect.Value{})
+}
+func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value {
+ if _, ok := ms.valConv.(*messageConverter); !ok {
+ panic("invalid Mutable on map with non-message value type")
+ }
+ v := ms.Get(k)
+ if !v.IsValid() {
+ v = ms.NewValue()
+ ms.Set(k, v)
+ }
+ return v
+}
+func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) {
+ iter := mapRange(ms.v)
+ for iter.Next() {
+ k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
+ v := ms.valConv.PBValueOf(iter.Value())
+ if !f(k, v) {
+ return
+ }
+ }
+}
+func (ms *mapReflect) NewValue() pref.Value {
+ return ms.valConv.New()
+}
+func (ms *mapReflect) IsValid() bool {
+ return !ms.v.IsNil()
+}
+func (ms *mapReflect) protoUnwrap() interface{} {
+ return ms.v.Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
new file mode 100644
index 0000000..949dc49
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -0,0 +1,276 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "math/bits"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+var errDecode = errors.New("cannot parse invalid wire-format data")
+
+type unmarshalOptions struct {
+ flags protoiface.UnmarshalInputFlags
+ resolver interface {
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+ }
+}
+
+func (o unmarshalOptions) Options() proto.UnmarshalOptions {
+ return proto.UnmarshalOptions{
+ Merge: true,
+ AllowPartial: true,
+ DiscardUnknown: o.DiscardUnknown(),
+ Resolver: o.resolver,
+ }
+}
+
+func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 }
+
+func (o unmarshalOptions) IsDefault() bool {
+ return o.flags == 0 && o.resolver == preg.GlobalTypes
+}
+
+var lazyUnmarshalOptions = unmarshalOptions{
+ resolver: preg.GlobalTypes,
+}
+
+type unmarshalOutput struct {
+ n int // number of bytes consumed
+ initialized bool
+}
+
+// unmarshal is protoreflect.Methods.Unmarshal.
+func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{
+ flags: in.Flags,
+ resolver: in.Resolver,
+ })
+ var flags piface.UnmarshalOutputFlags
+ if out.initialized {
+ flags |= piface.UnmarshalInitialized
+ }
+ return piface.UnmarshalOutput{
+ Flags: flags,
+ }, err
+}
+
+// errUnknown is returned during unmarshaling to indicate a parse error that
+// should result in a field being placed in the unknown fields section (for example,
+// when the wire type doesn't match) as opposed to the entire unmarshal operation
+// failing (for example, when a field extends past the available input).
+//
+// This is a sentinel error which should never be visible to the user.
+var errUnknown = errors.New("unknown")
+
+func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ mi.init()
+ if flags.ProtoLegacy && mi.isMessageSet {
+ return unmarshalMessageSet(mi, b, p, opts)
+ }
+ initialized := true
+ var requiredMask uint64
+ var exts *map[int32]ExtensionField
+ start := len(b)
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return out, errDecode
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+
+ if wtyp == protowire.EndGroupType {
+ if num != groupTag {
+ return out, errDecode
+ }
+ groupTag = 0
+ break
+ }
+
+ var f *coderFieldInfo
+ if int(num) < len(mi.denseCoderFields) {
+ f = mi.denseCoderFields[num]
+ } else {
+ f = mi.coderFields[num]
+ }
+ var n int
+ err := errUnknown
+ switch {
+ case f != nil:
+ if f.funcs.unmarshal == nil {
+ break
+ }
+ var o unmarshalOutput
+ o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
+ n = o.n
+ if err != nil {
+ break
+ }
+ requiredMask |= f.validation.requiredBit
+ if f.funcs.isInit != nil && !o.initialized {
+ initialized = false
+ }
+ default:
+ // Possible extension.
+ if exts == nil && mi.extensionOffset.IsValid() {
+ exts = p.Apply(mi.extensionOffset).Extensions()
+ if *exts == nil {
+ *exts = make(map[int32]ExtensionField)
+ }
+ }
+ if exts == nil {
+ break
+ }
+ var o unmarshalOutput
+ o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
+ if err != nil {
+ break
+ }
+ n = o.n
+ if !o.initialized {
+ initialized = false
+ }
+ }
+ if err != nil {
+ if err != errUnknown {
+ return out, err
+ }
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
+ u := mi.mutableUnknownBytes(p)
+ *u = protowire.AppendTag(*u, num, wtyp)
+ *u = append(*u, b[:n]...)
+ }
+ }
+ b = b[n:]
+ }
+ if groupTag != 0 {
+ return out, errDecode
+ }
+ if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
+ initialized = false
+ }
+ if initialized {
+ out.initialized = true
+ }
+ out.n = start - len(b)
+ return out, nil
+}
+
+func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ x := exts[int32(num)]
+ xt := x.Type()
+ if xt == nil {
+ var err error
+ xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num)
+ if err != nil {
+ if err == preg.NotFound {
+ return out, errUnknown
+ }
+ return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err)
+ }
+ }
+ xi := getExtensionFieldInfo(xt)
+ if xi.funcs.unmarshal == nil {
+ return out, errUnknown
+ }
+ if flags.LazyUnmarshalExtensions {
+ if opts.IsDefault() && x.canLazy(xt) {
+ out, valid := skipExtension(b, xi, num, wtyp, opts)
+ switch valid {
+ case ValidationValid:
+ if out.initialized {
+ x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n])
+ exts[int32(num)] = x
+ return out, nil
+ }
+ case ValidationInvalid:
+ return out, errDecode
+ case ValidationUnknown:
+ }
+ }
+ }
+ ival := x.Value()
+ if !ival.IsValid() && xi.unmarshalNeedsValue {
+ // Create a new message, list, or map value to fill in.
+ // For enums, create a prototype value to let the unmarshal func know the
+ // concrete type.
+ ival = xt.New()
+ }
+ v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts)
+ if err != nil {
+ return out, err
+ }
+ if xi.funcs.isInit == nil {
+ out.initialized = true
+ }
+ x.Set(xt, v)
+ exts[int32(num)] = x
+ return out, nil
+}
+
+func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
+ if xi.validation.mi == nil {
+ return out, ValidationUnknown
+ }
+ xi.validation.mi.init()
+ switch xi.validation.typ {
+ case validationTypeMessage:
+ if wtyp != protowire.BytesType {
+ return out, ValidationUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, ValidationUnknown
+ }
+ out, st := xi.validation.mi.validate(v, 0, opts)
+ out.n = n
+ return out, st
+ case validationTypeGroup:
+ if wtyp != protowire.StartGroupType {
+ return out, ValidationUnknown
+ }
+ out, st := xi.validation.mi.validate(b, num, opts)
+ return out, st
+ default:
+ return out, ValidationUnknown
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
new file mode 100644
index 0000000..845c67d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -0,0 +1,201 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/flags"
+ proto "google.golang.org/protobuf/proto"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type marshalOptions struct {
+ flags piface.MarshalInputFlags
+}
+
+func (o marshalOptions) Options() proto.MarshalOptions {
+ return proto.MarshalOptions{
+ AllowPartial: true,
+ Deterministic: o.Deterministic(),
+ UseCachedSize: o.UseCachedSize(),
+ }
+}
+
+func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 }
+func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 }
+
+// size is protoreflect.Methods.Size.
+func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ size := mi.sizePointer(p, marshalOptions{
+ flags: in.Flags,
+ })
+ return piface.SizeOutput{Size: size}
+}
+
+func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) {
+ mi.init()
+ if p.IsNil() {
+ return 0
+ }
+ if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() {
+ if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 {
+ return int(size)
+ }
+ }
+ return mi.sizePointerSlow(p, opts)
+}
+
+func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) {
+ if flags.ProtoLegacy && mi.isMessageSet {
+ size = sizeMessageSet(mi, p, opts)
+ if mi.sizecacheOffset.IsValid() {
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ }
+ return size
+ }
+ if mi.extensionOffset.IsValid() {
+ e := p.Apply(mi.extensionOffset).Extensions()
+ size += mi.sizeExtensions(e, opts)
+ }
+ for _, f := range mi.orderedCoderFields {
+ if f.funcs.size == nil {
+ continue
+ }
+ fptr := p.Apply(f.offset)
+ if f.isPointer && fptr.Elem().IsNil() {
+ continue
+ }
+ size += f.funcs.size(fptr, f, opts)
+ }
+ if mi.unknownOffset.IsValid() {
+ if u := mi.getUnknownBytes(p); u != nil {
+ size += len(*u)
+ }
+ }
+ if mi.sizecacheOffset.IsValid() {
+ if size > math.MaxInt32 {
+ // The size is too large for the int32 sizecache field.
+ // We will need to recompute the size when encoding;
+ // unfortunately expensive, but better than invalid output.
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1)
+ } else {
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ }
+ }
+ return size
+}
+
+// marshal is protoreflect.Methods.Marshal.
+func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{
+ flags: in.Flags,
+ })
+ return piface.MarshalOutput{Buf: b}, err
+}
+
+func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) {
+ mi.init()
+ if p.IsNil() {
+ return b, nil
+ }
+ if flags.ProtoLegacy && mi.isMessageSet {
+ return marshalMessageSet(mi, b, p, opts)
+ }
+ var err error
+ // The old marshaler encodes extensions at beginning.
+ if mi.extensionOffset.IsValid() {
+ e := p.Apply(mi.extensionOffset).Extensions()
+ // TODO: Special handling for MessageSet?
+ b, err = mi.appendExtensions(b, e, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range mi.orderedCoderFields {
+ if f.funcs.marshal == nil {
+ continue
+ }
+ fptr := p.Apply(f.offset)
+ if f.isPointer && fptr.Elem().IsNil() {
+ continue
+ }
+ b, err = f.funcs.marshal(b, fptr, f, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ if mi.unknownOffset.IsValid() && !mi.isMessageSet {
+ if u := mi.getUnknownBytes(p); u != nil {
+ b = append(b, (*u)...)
+ }
+ }
+ return b, nil
+}
+
+func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
+ if ext == nil {
+ return 0
+ }
+ for _, x := range *ext {
+ xi := getExtensionFieldInfo(x.Type())
+ if xi.funcs.size == nil {
+ continue
+ }
+ n += xi.funcs.size(x.Value(), xi.tagsize, opts)
+ }
+ return n
+}
+
+func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) {
+ if ext == nil {
+ return b, nil
+ }
+
+ switch len(*ext) {
+ case 0:
+ return b, nil
+ case 1:
+ // Fast-path for one extension: Don't bother sorting the keys.
+ var err error
+ for _, x := range *ext {
+ xi := getExtensionFieldInfo(x.Type())
+ b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
+ }
+ return b, err
+ default:
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(*ext))
+ for k := range *ext {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ var err error
+ for _, k := range keys {
+ x := (*ext)[int32(k)]
+ xi := getExtensionFieldInfo(x.Type())
+ b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go
new file mode 100644
index 0000000..8c1eab4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type EnumInfo struct {
+ GoReflectType reflect.Type // int32 kind
+ Desc pref.EnumDescriptor
+}
+
+func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum {
+ return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum)
+}
+func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go
new file mode 100644
index 0000000..e904fd9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go
@@ -0,0 +1,156 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+ "sync"
+ "sync/atomic"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// ExtensionInfo implements ExtensionType.
+//
+// This type contains a number of exported fields for legacy compatibility.
+// The only non-deprecated use of this type is through the methods of the
+// ExtensionType interface.
+type ExtensionInfo struct {
+ // An ExtensionInfo may exist in several stages of initialization.
+ //
+ // extensionInfoUninitialized: Some or all of the legacy exported
+ // fields may be set, but none of the unexported fields have been
+ // initialized. This is the starting state for an ExtensionInfo
+ // in legacy generated code.
+ //
+ // extensionInfoDescInit: The desc field is set, but other unexported fields
+ // may not be initialized. Legacy exported fields may or may not be set.
+ // This is the starting state for an ExtensionInfo in newly generated code.
+ //
+ // extensionInfoFullInit: The ExtensionInfo is fully initialized.
+ // This state is only entered after lazy initialization is complete.
+ init uint32
+ mu sync.Mutex
+
+ goType reflect.Type
+ desc extensionTypeDescriptor
+ conv Converter
+ info *extensionFieldInfo // for fast-path method implementations
+
+ // ExtendedType is a typed nil-pointer to the parent message type that
+ // is being extended. It is possible for this to be unpopulated in v2
+ // since the message may no longer implement the MessageV1 interface.
+ //
+ // Deprecated: Use the ExtendedType method instead.
+ ExtendedType piface.MessageV1
+
+ // ExtensionType is the zero value of the extension type.
+ //
+ // For historical reasons, reflect.TypeOf(ExtensionType) and the
+ // type returned by InterfaceOf may not be identical.
+ //
+ // Deprecated: Use InterfaceOf(xt.Zero()) instead.
+ ExtensionType interface{}
+
+ // Field is the field number of the extension.
+ //
+ // Deprecated: Use the Descriptor().Number method instead.
+ Field int32
+
+ // Name is the fully qualified name of extension.
+ //
+ // Deprecated: Use the Descriptor().FullName method instead.
+ Name string
+
+ // Tag is the protobuf struct tag used in the v1 API.
+ //
+ // Deprecated: Do not use.
+ Tag string
+
+ // Filename is the proto filename in which the extension is defined.
+ //
+ // Deprecated: Use Descriptor().ParentFile().Path() instead.
+ Filename string
+}
+
+// Stages of initialization: See the ExtensionInfo.init field.
+const (
+ extensionInfoUninitialized = 0
+ extensionInfoDescInit = 1
+ extensionInfoFullInit = 2
+)
+
+func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) {
+ xi.goType = goType
+ xi.desc = extensionTypeDescriptor{xd, xi}
+ xi.init = extensionInfoDescInit
+}
+
+func (xi *ExtensionInfo) New() pref.Value {
+ return xi.lazyInit().New()
+}
+func (xi *ExtensionInfo) Zero() pref.Value {
+ return xi.lazyInit().Zero()
+}
+func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value {
+ return xi.lazyInit().PBValueOf(reflect.ValueOf(v))
+}
+func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} {
+ return xi.lazyInit().GoValueOf(v).Interface()
+}
+func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool {
+ return xi.lazyInit().IsValidPB(v)
+}
+func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool {
+ return xi.lazyInit().IsValidGo(reflect.ValueOf(v))
+}
+func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor {
+ if atomic.LoadUint32(&xi.init) < extensionInfoDescInit {
+ xi.lazyInitSlow()
+ }
+ return &xi.desc
+}
+
+func (xi *ExtensionInfo) lazyInit() Converter {
+ if atomic.LoadUint32(&xi.init) < extensionInfoFullInit {
+ xi.lazyInitSlow()
+ }
+ return xi.conv
+}
+
+func (xi *ExtensionInfo) lazyInitSlow() {
+ xi.mu.Lock()
+ defer xi.mu.Unlock()
+
+ if xi.init == extensionInfoFullInit {
+ return
+ }
+ defer atomic.StoreUint32(&xi.init, extensionInfoFullInit)
+
+ if xi.desc.ExtensionDescriptor == nil {
+ xi.initFromLegacy()
+ }
+ if !xi.desc.ExtensionDescriptor.IsPlaceholder() {
+ if xi.ExtensionType == nil {
+ xi.initToLegacy()
+ }
+ xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor)
+ xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor)
+ xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType)
+ }
+}
+
+type extensionTypeDescriptor struct {
+ pref.ExtensionDescriptor
+ xi *ExtensionInfo
+}
+
+func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType {
+ return xtd.xi
+}
+func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor {
+ return xtd.ExtensionDescriptor
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
new file mode 100644
index 0000000..f7d7ffb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
@@ -0,0 +1,219 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// legacyEnumName returns the name of enums used in legacy code.
+// It is neither the protobuf full name nor the qualified Go name,
+// but rather an odd hybrid of both.
+func legacyEnumName(ed pref.EnumDescriptor) string {
+ var protoPkg string
+ enumName := string(ed.FullName())
+ if fd := ed.ParentFile(); fd != nil {
+ protoPkg = string(fd.Package())
+ enumName = strings.TrimPrefix(enumName, protoPkg+".")
+ }
+ if protoPkg == "" {
+ return strs.GoCamelCase(enumName)
+ }
+ return protoPkg + "." + strs.GoCamelCase(enumName)
+}
+
+// legacyWrapEnum wraps v as a protoreflect.Enum,
+// where v must be a int32 kind and not implement the v2 API already.
+func legacyWrapEnum(v reflect.Value) pref.Enum {
+ et := legacyLoadEnumType(v.Type())
+ return et.New(pref.EnumNumber(v.Int()))
+}
+
+var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType
+
+// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t,
+// where t must be an int32 kind and not implement the v2 API already.
+func legacyLoadEnumType(t reflect.Type) pref.EnumType {
+ // Fast-path: check if a EnumType is cached for this concrete type.
+ if et, ok := legacyEnumTypeCache.Load(t); ok {
+ return et.(pref.EnumType)
+ }
+
+ // Slow-path: derive enum descriptor and initialize EnumType.
+ var et pref.EnumType
+ ed := LegacyLoadEnumDesc(t)
+ et = &legacyEnumType{
+ desc: ed,
+ goType: t,
+ }
+ if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok {
+ return et.(pref.EnumType)
+ }
+ return et
+}
+
+type legacyEnumType struct {
+ desc pref.EnumDescriptor
+ goType reflect.Type
+ m sync.Map // map[protoreflect.EnumNumber]proto.Enum
+}
+
+func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum {
+ if e, ok := t.m.Load(n); ok {
+ return e.(pref.Enum)
+ }
+ e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType}
+ t.m.Store(n, e)
+ return e
+}
+func (t *legacyEnumType) Descriptor() pref.EnumDescriptor {
+ return t.desc
+}
+
+type legacyEnumWrapper struct {
+ num pref.EnumNumber
+ pbTyp pref.EnumType
+ goTyp reflect.Type
+}
+
+func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor {
+ return e.pbTyp.Descriptor()
+}
+func (e *legacyEnumWrapper) Type() pref.EnumType {
+ return e.pbTyp
+}
+func (e *legacyEnumWrapper) Number() pref.EnumNumber {
+ return e.num
+}
+func (e *legacyEnumWrapper) ProtoReflect() pref.Enum {
+ return e
+}
+func (e *legacyEnumWrapper) protoUnwrap() interface{} {
+ v := reflect.New(e.goTyp).Elem()
+ v.SetInt(int64(e.num))
+ return v.Interface()
+}
+
+var (
+ _ pref.Enum = (*legacyEnumWrapper)(nil)
+ _ unwrapper = (*legacyEnumWrapper)(nil)
+)
+
+var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor
+
+// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type,
+// which must be an int32 kind and not implement the v2 API already.
+//
+// This is exported for testing purposes.
+func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
+ // Fast-path: check if an EnumDescriptor is cached for this concrete type.
+ if ed, ok := legacyEnumDescCache.Load(t); ok {
+ return ed.(pref.EnumDescriptor)
+ }
+
+ // Slow-path: initialize EnumDescriptor from the raw descriptor.
+ ev := reflect.Zero(t).Interface()
+ if _, ok := ev.(pref.Enum); ok {
+ panic(fmt.Sprintf("%v already implements proto.Enum", t))
+ }
+ edV1, ok := ev.(enumV1)
+ if !ok {
+ return aberrantLoadEnumDesc(t)
+ }
+ b, idxs := edV1.EnumDescriptor()
+
+ var ed pref.EnumDescriptor
+ if len(idxs) == 1 {
+ ed = legacyLoadFileDesc(b).Enums().Get(idxs[0])
+ } else {
+ md := legacyLoadFileDesc(b).Messages().Get(idxs[0])
+ for _, i := range idxs[1 : len(idxs)-1] {
+ md = md.Messages().Get(i)
+ }
+ ed = md.Enums().Get(idxs[len(idxs)-1])
+ }
+ if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok {
+ return ed.(protoreflect.EnumDescriptor)
+ }
+ return ed
+}
+
+var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor
+
+// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type,
+// which must not implement protoreflect.Enum or enumV1.
+//
+// If the type does not implement enumV1, then there is no reliable
+// way to derive the original protobuf type information.
+// We are unable to use the global enum registry since it is
+// unfortunately keyed by the protobuf full name, which we also do not know.
+// Thus, this produces some bogus enum descriptor based on the Go type name.
+func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
+ // Fast-path: check if an EnumDescriptor is cached for this concrete type.
+ if ed, ok := aberrantEnumDescCache.Load(t); ok {
+ return ed.(pref.EnumDescriptor)
+ }
+
+ // Slow-path: construct a bogus, but unique EnumDescriptor.
+ ed := &filedesc.Enum{L2: new(filedesc.EnumL2)}
+ ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum
+ ed.L0.ParentFile = filedesc.SurrogateProto3
+ ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{})
+
+ // TODO: Use the presence of a UnmarshalJSON method to determine proto2?
+
+ vd := &ed.L2.Values.List[0]
+ vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN
+ vd.L0.ParentFile = ed.L0.ParentFile
+ vd.L0.Parent = ed
+
+ // TODO: We could use the String method to obtain some enum value names by
+ // starting at 0 and print the enum until it produces invalid identifiers.
+ // An exhaustive query is clearly impractical, but can be best-effort.
+
+ if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok {
+ return ed.(pref.EnumDescriptor)
+ }
+ return ed
+}
+
+// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type
+// The provided name is not guaranteed to be stable nor universally unique.
+// It should be sufficiently unique within a program.
+//
+// This is exported for testing purposes.
+func AberrantDeriveFullName(t reflect.Type) pref.FullName {
+ sanitize := func(r rune) rune {
+ switch {
+ case r == '/':
+ return '.'
+ case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9':
+ return r
+ default:
+ return '_'
+ }
+ }
+ prefix := strings.Map(sanitize, t.PkgPath())
+ suffix := strings.Map(sanitize, t.Name())
+ if suffix == "" {
+ suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer())
+ }
+
+ ss := append(strings.Split(prefix, "."), suffix)
+ for i, s := range ss {
+ if s == "" || ('0' <= s[0] && s[0] <= '9') {
+ ss[i] = "x" + s
+ }
+ }
+ return pref.FullName(strings.Join(ss, "."))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
new file mode 100644
index 0000000..e3fb0b5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
@@ -0,0 +1,92 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "hash/crc32"
+ "math"
+ "reflect"
+
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// These functions exist to support exported APIs in generated protobufs.
+// While these are deprecated, they cannot be removed for compatibility reasons.
+
+// LegacyEnumName returns the name of enums used in legacy code.
+func (Export) LegacyEnumName(ed pref.EnumDescriptor) string {
+ return legacyEnumName(ed)
+}
+
+// LegacyMessageTypeOf returns the protoreflect.MessageType for m,
+// with name used as the message name if necessary.
+func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType {
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect().Type()
+ }
+ return legacyLoadMessageType(reflect.TypeOf(m), name)
+}
+
+// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input.
+// The input can either be a string representing the enum value by name,
+// or a number representing the enum number itself.
+func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) {
+ if b[0] == '"' {
+ var name pref.Name
+ if err := json.Unmarshal(b, &name); err != nil {
+ return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b)
+ }
+ ev := ed.Values().ByName(name)
+ if ev == nil {
+ return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name)
+ }
+ return ev.Number(), nil
+ } else {
+ var num pref.EnumNumber
+ if err := json.Unmarshal(b, &num); err != nil {
+ return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b)
+ }
+ return num, nil
+ }
+}
+
+// CompressGZIP compresses the input as a GZIP-encoded file.
+// The current implementation does no compression.
+func (Export) CompressGZIP(in []byte) (out []byte) {
+ // RFC 1952, section 2.3.1.
+ var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff}
+
+ // RFC 1951, section 3.2.4.
+ var blockHeader [5]byte
+ const maxBlockSize = math.MaxUint16
+ numBlocks := 1 + len(in)/maxBlockSize
+
+ // RFC 1952, section 2.3.1.
+ var gzipFooter [8]byte
+ binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in))
+ binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in)))
+
+ // Encode the input without compression using raw DEFLATE blocks.
+ out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter))
+ out = append(out, gzipHeader[:]...)
+ for blockHeader[0] == 0 {
+ blockSize := maxBlockSize
+ if blockSize > len(in) {
+ blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3.
+ blockSize = len(in)
+ }
+ binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000)
+ binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff)
+ out = append(out, blockHeader[:]...)
+ out = append(out, in[:blockSize]...)
+ in = in[blockSize:]
+ }
+ out = append(out, gzipFooter[:]...)
+ return out
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
new file mode 100644
index 0000000..49e7231
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -0,0 +1,176 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ ptag "google.golang.org/protobuf/internal/encoding/tag"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (xi *ExtensionInfo) initToLegacy() {
+ xd := xi.desc
+ var parent piface.MessageV1
+ messageName := xd.ContainingMessage().FullName()
+ if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil {
+ // Create a new parent message and unwrap it if possible.
+ mv := mt.New().Interface()
+ t := reflect.TypeOf(mv)
+ if mv, ok := mv.(unwrapper); ok {
+ t = reflect.TypeOf(mv.protoUnwrap())
+ }
+
+ // Check whether the message implements the legacy v1 Message interface.
+ mz := reflect.Zero(t).Interface()
+ if mz, ok := mz.(piface.MessageV1); ok {
+ parent = mz
+ }
+ }
+
+ // Determine the v1 extension type, which is unfortunately not the same as
+ // the v2 ExtensionType.GoType.
+ extType := xi.goType
+ switch extType.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields
+ }
+
+ // Reconstruct the legacy enum full name.
+ var enumName string
+ if xd.Kind() == pref.EnumKind {
+ enumName = legacyEnumName(xd.Enum())
+ }
+
+ // Derive the proto file that the extension was declared within.
+ var filename string
+ if fd := xd.ParentFile(); fd != nil {
+ filename = fd.Path()
+ }
+
+ // For MessageSet extensions, the name used is the parent message.
+ name := xd.FullName()
+ if messageset.IsMessageSetExtension(xd) {
+ name = name.Parent()
+ }
+
+ xi.ExtendedType = parent
+ xi.ExtensionType = reflect.Zero(extType).Interface()
+ xi.Field = int32(xd.Number())
+ xi.Name = string(name)
+ xi.Tag = ptag.Marshal(xd, enumName)
+ xi.Filename = filename
+}
+
+// initFromLegacy initializes an ExtensionInfo from
+// the contents of the deprecated exported fields of the type.
+func (xi *ExtensionInfo) initFromLegacy() {
+ // The v1 API returns "type incomplete" descriptors where only the
+ // field number is specified. In such a case, use a placeholder.
+ if xi.ExtendedType == nil || xi.ExtensionType == nil {
+ xd := placeholderExtension{
+ name: pref.FullName(xi.Name),
+ number: pref.FieldNumber(xi.Field),
+ }
+ xi.desc = extensionTypeDescriptor{xd, xi}
+ return
+ }
+
+ // Resolve enum or message dependencies.
+ var ed pref.EnumDescriptor
+ var md pref.MessageDescriptor
+ t := reflect.TypeOf(xi.ExtensionType)
+ isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
+ isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+ if isOptional || isRepeated {
+ t = t.Elem()
+ }
+ switch v := reflect.Zero(t).Interface().(type) {
+ case pref.Enum:
+ ed = v.Descriptor()
+ case enumV1:
+ ed = LegacyLoadEnumDesc(t)
+ case pref.ProtoMessage:
+ md = v.ProtoReflect().Descriptor()
+ case messageV1:
+ md = LegacyLoadMessageDesc(t)
+ }
+
+ // Derive basic field information from the struct tag.
+ var evs pref.EnumValueDescriptors
+ if ed != nil {
+ evs = ed.Values()
+ }
+ fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field)
+
+ // Construct a v2 ExtensionType.
+ xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)}
+ xd.L0.ParentFile = filedesc.SurrogateProto2
+ xd.L0.FullName = pref.FullName(xi.Name)
+ xd.L1.Number = pref.FieldNumber(xi.Field)
+ xd.L1.Cardinality = fd.L1.Cardinality
+ xd.L1.Kind = fd.L1.Kind
+ xd.L2.IsPacked = fd.L1.IsPacked
+ xd.L2.Default = fd.L1.Default
+ xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType)
+ xd.L2.Enum = ed
+ xd.L2.Message = md
+
+ // Derive real extension field name for MessageSets.
+ if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName {
+ xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName)
+ }
+
+ tt := reflect.TypeOf(xi.ExtensionType)
+ if isOptional {
+ tt = tt.Elem()
+ }
+ xi.goType = tt
+ xi.desc = extensionTypeDescriptor{xd, xi}
+}
+
+type placeholderExtension struct {
+ name pref.FullName
+ number pref.FieldNumber
+}
+
+func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil }
+func (x placeholderExtension) Parent() pref.Descriptor { return nil }
+func (x placeholderExtension) Index() int { return 0 }
+func (x placeholderExtension) Syntax() pref.Syntax { return 0 }
+func (x placeholderExtension) Name() pref.Name { return x.name.Name() }
+func (x placeholderExtension) FullName() pref.FullName { return x.name }
+func (x placeholderExtension) IsPlaceholder() bool { return true }
+func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field }
+func (x placeholderExtension) Number() pref.FieldNumber { return x.number }
+func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 }
+func (x placeholderExtension) Kind() pref.Kind { return 0 }
+func (x placeholderExtension) HasJSONName() bool { return false }
+func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" }
+func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" }
+func (x placeholderExtension) HasPresence() bool { return false }
+func (x placeholderExtension) HasOptionalKeyword() bool { return false }
+func (x placeholderExtension) IsExtension() bool { return true }
+func (x placeholderExtension) IsWeak() bool { return false }
+func (x placeholderExtension) IsPacked() bool { return false }
+func (x placeholderExtension) IsList() bool { return false }
+func (x placeholderExtension) IsMap() bool { return false }
+func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil }
+func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil }
+func (x placeholderExtension) HasDefault() bool { return false }
+func (x placeholderExtension) Default() pref.Value { return pref.Value{} }
+func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil }
+func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil }
+func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil }
+func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil }
+func (x placeholderExtension) Message() pref.MessageDescriptor { return nil }
+func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return }
+func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
new file mode 100644
index 0000000..9ab0910
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
@@ -0,0 +1,81 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io/ioutil"
+ "sync"
+
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Every enum and message type generated by protoc-gen-go since commit 2fc053c5
+// on February 25th, 2016 has had a method to get the raw descriptor.
+// Types that were not generated by protoc-gen-go or were generated prior
+// to that version are not supported.
+//
+// The []byte returned is the encoded form of a FileDescriptorProto message
+// compressed using GZIP. The []int is the path from the top-level file
+// to the specific message or enum declaration.
+type (
+ enumV1 interface {
+ EnumDescriptor() ([]byte, []int)
+ }
+ messageV1 interface {
+ Descriptor() ([]byte, []int)
+ }
+)
+
+var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor
+
+// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message.
+//
+// This assumes that b is immutable and that b does not refer to part of a
+// concatenated series of GZIP files (which would require shenanigans that
+// rely on the concatenation properties of both protobufs and GZIP).
+// File descriptors generated by protoc-gen-go do not rely on that property.
+func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor {
+ // Fast-path: check whether we already have a cached file descriptor.
+ if fd, ok := legacyFileDescCache.Load(&b[0]); ok {
+ return fd.(protoreflect.FileDescriptor)
+ }
+
+ // Slow-path: decompress and unmarshal the file descriptor proto.
+ zr, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ panic(err)
+ }
+ b2, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(err)
+ }
+
+ fd := filedesc.Builder{
+ RawDescriptor: b2,
+ FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry
+ }.Build().File
+ if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok {
+ return fd.(protoreflect.FileDescriptor)
+ }
+ return fd
+}
+
+type resolverOnly struct {
+ reg *protoregistry.Files
+}
+
+func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ return r.reg.FindFileByPath(path)
+}
+func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ return r.reg.FindDescriptorByName(name)
+}
+func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error {
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
new file mode 100644
index 0000000..3759b01
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -0,0 +1,558 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/internal/descopts"
+ ptag "google.golang.org/protobuf/internal/encoding/tag"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// legacyWrapMessage wraps v as a protoreflect.Message,
+// where v must be a *struct kind and not implement the v2 API already.
+func legacyWrapMessage(v reflect.Value) pref.Message {
+ t := v.Type()
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
+ return aberrantMessage{v: v}
+ }
+ mt := legacyLoadMessageInfo(t, "")
+ return mt.MessageOf(v.Interface())
+}
+
+// legacyLoadMessageType dynamically loads a protoreflect.Type for t,
+// where t must be not implement the v2 API already.
+// The provided name is used if it cannot be determined from the message.
+func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType {
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
+ return aberrantMessageType{t}
+ }
+ return legacyLoadMessageInfo(t, name)
+}
+
+var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo
+
+// legacyLoadMessageInfo dynamically loads a *MessageInfo for t,
+// where t must be a *struct kind and not implement the v2 API already.
+// The provided name is used if it cannot be determined from the message.
+func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo {
+ // Fast-path: check if a MessageInfo is cached for this concrete type.
+ if mt, ok := legacyMessageTypeCache.Load(t); ok {
+ return mt.(*MessageInfo)
+ }
+
+ // Slow-path: derive message descriptor and initialize MessageInfo.
+ mi := &MessageInfo{
+ Desc: legacyLoadMessageDesc(t, name),
+ GoReflectType: t,
+ }
+
+ var hasMarshal, hasUnmarshal bool
+ v := reflect.Zero(t).Interface()
+ if _, hasMarshal = v.(legacyMarshaler); hasMarshal {
+ mi.methods.Marshal = legacyMarshal
+
+ // We have no way to tell whether the type's Marshal method
+ // supports deterministic serialization or not, but this
+ // preserves the v1 implementation's behavior of always
+ // calling Marshal methods when present.
+ mi.methods.Flags |= piface.SupportMarshalDeterministic
+ }
+ if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal {
+ mi.methods.Unmarshal = legacyUnmarshal
+ }
+ if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) {
+ mi.methods.Merge = legacyMerge
+ }
+
+ if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok {
+ return mi.(*MessageInfo)
+ }
+ return mi
+}
+
+var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor
+
+// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type,
+// which should be a *struct kind and must not implement the v2 API already.
+//
+// This is exported for testing purposes.
+func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor {
+ return legacyLoadMessageDesc(t, "")
+}
+func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+ // Fast-path: check if a MessageDescriptor is cached for this concrete type.
+ if mi, ok := legacyMessageDescCache.Load(t); ok {
+ return mi.(pref.MessageDescriptor)
+ }
+
+ // Slow-path: initialize MessageDescriptor from the raw descriptor.
+ mv := reflect.Zero(t).Interface()
+ if _, ok := mv.(pref.ProtoMessage); ok {
+ panic(fmt.Sprintf("%v already implements proto.Message", t))
+ }
+ mdV1, ok := mv.(messageV1)
+ if !ok {
+ return aberrantLoadMessageDesc(t, name)
+ }
+
+ // If this is a dynamic message type where there isn't a 1-1 mapping between
+ // Go and protobuf types, calling the Descriptor method on the zero value of
+ // the message type isn't likely to work. If it panics, swallow the panic and
+ // continue as if the Descriptor method wasn't present.
+ b, idxs := func() ([]byte, []int) {
+ defer func() {
+ recover()
+ }()
+ return mdV1.Descriptor()
+ }()
+ if b == nil {
+ return aberrantLoadMessageDesc(t, name)
+ }
+
+ // If the Go type has no fields, then this might be a proto3 empty message
+ // from before the size cache was added. If there are any fields, check to
+ // see that at least one of them looks like something we generated.
+ if t.Elem().Kind() == reflect.Struct {
+ if nfield := t.Elem().NumField(); nfield > 0 {
+ hasProtoField := false
+ for i := 0; i < nfield; i++ {
+ f := t.Elem().Field(i)
+ if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") {
+ hasProtoField = true
+ break
+ }
+ }
+ if !hasProtoField {
+ return aberrantLoadMessageDesc(t, name)
+ }
+ }
+ }
+
+ md := legacyLoadFileDesc(b).Messages().Get(idxs[0])
+ for _, i := range idxs[1:] {
+ md = md.Messages().Get(i)
+ }
+ if name != "" && md.FullName() != name {
+ panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name))
+ }
+ if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok {
+ return md.(protoreflect.MessageDescriptor)
+ }
+ return md
+}
+
+var (
+ aberrantMessageDescLock sync.Mutex
+ aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor
+)
+
+// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type,
+// which must not implement protoreflect.ProtoMessage or messageV1.
+//
+// This is a best-effort derivation of the message descriptor using the protobuf
+// tags on the struct fields.
+func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+ aberrantMessageDescLock.Lock()
+ defer aberrantMessageDescLock.Unlock()
+ if aberrantMessageDescCache == nil {
+ aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor)
+ }
+ return aberrantLoadMessageDescReentrant(t, name)
+}
+func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+ // Fast-path: check if an MessageDescriptor is cached for this concrete type.
+ if md, ok := aberrantMessageDescCache[t]; ok {
+ return md
+ }
+
+ // Slow-path: construct a descriptor from the Go struct type (best-effort).
+ // Cache the MessageDescriptor early on so that we can resolve internal
+ // cyclic references.
+ md := &filedesc.Message{L2: new(filedesc.MessageL2)}
+ md.L0.FullName = aberrantDeriveMessageName(t, name)
+ md.L0.ParentFile = filedesc.SurrogateProto2
+ aberrantMessageDescCache[t] = md
+
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
+ return md
+ }
+
+ // Try to determine if the message is using proto3 by checking scalars.
+ for i := 0; i < t.Elem().NumField(); i++ {
+ f := t.Elem().Field(i)
+ if tag := f.Tag.Get("protobuf"); tag != "" {
+ switch f.Type.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ md.L0.ParentFile = filedesc.SurrogateProto3
+ }
+ for _, s := range strings.Split(tag, ",") {
+ if s == "proto3" {
+ md.L0.ParentFile = filedesc.SurrogateProto3
+ }
+ }
+ }
+ }
+
+ // Obtain a list of oneof wrapper types.
+ var oneofWrappers []reflect.Type
+ for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
+ if fn, ok := t.MethodByName(method); ok {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ for _, v := range vs {
+ oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
+ }
+ }
+ }
+ }
+ }
+
+ // Obtain a list of the extension ranges.
+ if fn, ok := t.MethodByName("ExtensionRangeArray"); ok {
+ vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0]
+ for i := 0; i < vs.Len(); i++ {
+ v := vs.Index(i)
+ md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{
+ pref.FieldNumber(v.FieldByName("Start").Int()),
+ pref.FieldNumber(v.FieldByName("End").Int() + 1),
+ })
+ md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil)
+ }
+ }
+
+ // Derive the message fields by inspecting the struct fields.
+ for i := 0; i < t.Elem().NumField(); i++ {
+ f := t.Elem().Field(i)
+ if tag := f.Tag.Get("protobuf"); tag != "" {
+ tagKey := f.Tag.Get("protobuf_key")
+ tagVal := f.Tag.Get("protobuf_val")
+ aberrantAppendField(md, f.Type, tag, tagKey, tagVal)
+ }
+ if tag := f.Tag.Get("protobuf_oneof"); tag != "" {
+ n := len(md.L2.Oneofs.List)
+ md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{})
+ od := &md.L2.Oneofs.List[n]
+ od.L0.FullName = md.FullName().Append(pref.Name(tag))
+ od.L0.ParentFile = md.L0.ParentFile
+ od.L0.Parent = md
+ od.L0.Index = n
+
+ for _, t := range oneofWrappers {
+ if t.Implements(f.Type) {
+ f := t.Elem().Field(0)
+ if tag := f.Tag.Get("protobuf"); tag != "" {
+ aberrantAppendField(md, f.Type, tag, "", "")
+ fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1]
+ fd.L1.ContainingOneof = od
+ od.L1.Fields.List = append(od.L1.Fields.List, fd)
+ }
+ }
+ }
+ }
+ }
+
+ return md
+}
+
+func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName {
+ if name.IsValid() {
+ return name
+ }
+ func() {
+ defer func() { recover() }() // swallow possible nil panics
+ if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok {
+ name = pref.FullName(m.XXX_MessageName())
+ }
+ }()
+ if name.IsValid() {
+ return name
+ }
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return AberrantDeriveFullName(t)
+}
+
+func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) {
+ t := goType
+ isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
+ isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+ if isOptional || isRepeated {
+ t = t.Elem()
+ }
+ fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field)
+
+ // Append field descriptor to the message.
+ n := len(md.L2.Fields.List)
+ md.L2.Fields.List = append(md.L2.Fields.List, *fd)
+ fd = &md.L2.Fields.List[n]
+ fd.L0.FullName = md.FullName().Append(fd.Name())
+ fd.L0.ParentFile = md.L0.ParentFile
+ fd.L0.Parent = md
+ fd.L0.Index = n
+
+ if fd.L1.IsWeak || fd.L1.HasPacked {
+ fd.L1.Options = func() pref.ProtoMessage {
+ opts := descopts.Field.ProtoReflect().New()
+ if fd.L1.IsWeak {
+ opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
+ }
+ if fd.L1.HasPacked {
+ opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked))
+ }
+ return opts.Interface()
+ }
+ }
+
+ // Populate Enum and Message.
+ if fd.Enum() == nil && fd.Kind() == pref.EnumKind {
+ switch v := reflect.Zero(t).Interface().(type) {
+ case pref.Enum:
+ fd.L1.Enum = v.Descriptor()
+ default:
+ fd.L1.Enum = LegacyLoadEnumDesc(t)
+ }
+ }
+ if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) {
+ switch v := reflect.Zero(t).Interface().(type) {
+ case pref.ProtoMessage:
+ fd.L1.Message = v.ProtoReflect().Descriptor()
+ case messageV1:
+ fd.L1.Message = LegacyLoadMessageDesc(t)
+ default:
+ if t.Kind() == reflect.Map {
+ n := len(md.L1.Messages.List)
+ md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)})
+ md2 := &md.L1.Messages.List[n]
+ md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name()))))
+ md2.L0.ParentFile = md.L0.ParentFile
+ md2.L0.Parent = md
+ md2.L0.Index = n
+
+ md2.L1.IsMapEntry = true
+ md2.L2.Options = func() pref.ProtoMessage {
+ opts := descopts.Message.ProtoReflect().New()
+ opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true))
+ return opts.Interface()
+ }
+
+ aberrantAppendField(md2, t.Key(), tagKey, "", "")
+ aberrantAppendField(md2, t.Elem(), tagVal, "", "")
+
+ fd.L1.Message = md2
+ break
+ }
+ fd.L1.Message = aberrantLoadMessageDescReentrant(t, "")
+ }
+ }
+}
+
+type placeholderEnumValues struct {
+ protoreflect.EnumValueDescriptors
+}
+
+func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor {
+ return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n)))
+}
+
+// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder.
+type legacyMarshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder.
+type legacyUnmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder.
+type legacyMerger interface {
+ Merge(protoiface.MessageV1)
+}
+
+var aberrantProtoMethods = &piface.Methods{
+ Marshal: legacyMarshal,
+ Unmarshal: legacyUnmarshal,
+ Merge: legacyMerge,
+
+ // We have no way to tell whether the type's Marshal method
+ // supports deterministic serialization or not, but this
+ // preserves the v1 implementation's behavior of always
+ // calling Marshal methods when present.
+ Flags: piface.SupportMarshalDeterministic,
+}
+
+func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) {
+ v := in.Message.(unwrapper).protoUnwrap()
+ marshaler, ok := v.(legacyMarshaler)
+ if !ok {
+ return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v)
+ }
+ out, err := marshaler.Marshal()
+ if in.Buf != nil {
+ out = append(in.Buf, out...)
+ }
+ return piface.MarshalOutput{
+ Buf: out,
+ }, err
+}
+
+func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) {
+ v := in.Message.(unwrapper).protoUnwrap()
+ unmarshaler, ok := v.(legacyUnmarshaler)
+ if !ok {
+ return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v)
+ }
+ return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf)
+}
+
+func legacyMerge(in piface.MergeInput) piface.MergeOutput {
+ // Check whether this supports the legacy merger.
+ dstv := in.Destination.(unwrapper).protoUnwrap()
+ merger, ok := dstv.(legacyMerger)
+ if ok {
+ merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
+ return piface.MergeOutput{Flags: piface.MergeComplete}
+ }
+
+ // If legacy merger is unavailable, implement merge in terms of
+ // a marshal and unmarshal operation.
+ srcv := in.Source.(unwrapper).protoUnwrap()
+ marshaler, ok := srcv.(legacyMarshaler)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ dstv = in.Destination.(unwrapper).protoUnwrap()
+ unmarshaler, ok := dstv.(legacyUnmarshaler)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ b, err := marshaler.Marshal()
+ if err != nil {
+ return piface.MergeOutput{}
+ }
+ err = unmarshaler.Unmarshal(b)
+ if err != nil {
+ return piface.MergeOutput{}
+ }
+ return piface.MergeOutput{Flags: piface.MergeComplete}
+}
+
+// aberrantMessageType implements MessageType for all types other than pointer-to-struct.
+type aberrantMessageType struct {
+ t reflect.Type
+}
+
+func (mt aberrantMessageType) New() pref.Message {
+ if mt.t.Kind() == reflect.Ptr {
+ return aberrantMessage{reflect.New(mt.t.Elem())}
+ }
+ return aberrantMessage{reflect.Zero(mt.t)}
+}
+func (mt aberrantMessageType) Zero() pref.Message {
+ return aberrantMessage{reflect.Zero(mt.t)}
+}
+func (mt aberrantMessageType) GoType() reflect.Type {
+ return mt.t
+}
+func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor {
+ return LegacyLoadMessageDesc(mt.t)
+}
+
+// aberrantMessage implements Message for all types other than pointer-to-struct.
+//
+// When the underlying type implements legacyMarshaler or legacyUnmarshaler,
+// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is
+// not much that can be done with values of this type.
+type aberrantMessage struct {
+ v reflect.Value
+}
+
+// Reset implements the v1 proto.Message.Reset method.
+func (m aberrantMessage) Reset() {
+ if mr, ok := m.v.Interface().(interface{ Reset() }); ok {
+ mr.Reset()
+ return
+ }
+ if m.v.Kind() == reflect.Ptr && !m.v.IsNil() {
+ m.v.Elem().Set(reflect.Zero(m.v.Type().Elem()))
+ }
+}
+
+func (m aberrantMessage) ProtoReflect() pref.Message {
+ return m
+}
+
+func (m aberrantMessage) Descriptor() pref.MessageDescriptor {
+ return LegacyLoadMessageDesc(m.v.Type())
+}
+func (m aberrantMessage) Type() pref.MessageType {
+ return aberrantMessageType{m.v.Type()}
+}
+func (m aberrantMessage) New() pref.Message {
+ if m.v.Type().Kind() == reflect.Ptr {
+ return aberrantMessage{reflect.New(m.v.Type().Elem())}
+ }
+ return aberrantMessage{reflect.Zero(m.v.Type())}
+}
+func (m aberrantMessage) Interface() pref.ProtoMessage {
+ return m
+}
+func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+ return
+}
+func (m aberrantMessage) Has(pref.FieldDescriptor) bool {
+ return false
+}
+func (m aberrantMessage) Clear(pref.FieldDescriptor) {
+ panic("invalid Message.Clear on " + string(m.Descriptor().FullName()))
+}
+func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value {
+ if fd.Default().IsValid() {
+ return fd.Default()
+ }
+ panic("invalid Message.Get on " + string(m.Descriptor().FullName()))
+}
+func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) {
+ panic("invalid Message.Set on " + string(m.Descriptor().FullName()))
+}
+func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value {
+ panic("invalid Message.Mutable on " + string(m.Descriptor().FullName()))
+}
+func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value {
+ panic("invalid Message.NewField on " + string(m.Descriptor().FullName()))
+}
+func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor {
+ panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName()))
+}
+func (m aberrantMessage) GetUnknown() pref.RawFields {
+ return nil
+}
+func (m aberrantMessage) SetUnknown(pref.RawFields) {
+ // SetUnknown discards its input on messages which don't support unknown field storage.
+}
+func (m aberrantMessage) IsValid() bool {
+ if m.v.Kind() == reflect.Ptr {
+ return !m.v.IsNil()
+ }
+ return false
+}
+func (m aberrantMessage) ProtoMethods() *piface.Methods {
+ return aberrantProtoMethods
+}
+func (m aberrantMessage) protoUnwrap() interface{} {
+ return m.v.Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
new file mode 100644
index 0000000..c65bbc0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -0,0 +1,176 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type mergeOptions struct{}
+
+func (o mergeOptions) Merge(dst, src proto.Message) {
+ proto.Merge(dst, src)
+}
+
+// merge is protoreflect.Methods.Merge.
+func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput {
+ dp, ok := mi.getPointer(in.Destination)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ sp, ok := mi.getPointer(in.Source)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ mi.mergePointer(dp, sp, mergeOptions{})
+ return piface.MergeOutput{Flags: piface.MergeComplete}
+}
+
+func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
+ mi.init()
+ if dst.IsNil() {
+ panic(fmt.Sprintf("invalid value: merging into nil message"))
+ }
+ if src.IsNil() {
+ return
+ }
+ for _, f := range mi.orderedCoderFields {
+ if f.funcs.merge == nil {
+ continue
+ }
+ sfptr := src.Apply(f.offset)
+ if f.isPointer && sfptr.Elem().IsNil() {
+ continue
+ }
+ f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
+ }
+ if mi.extensionOffset.IsValid() {
+ sext := src.Apply(mi.extensionOffset).Extensions()
+ dext := dst.Apply(mi.extensionOffset).Extensions()
+ if *dext == nil {
+ *dext = make(map[int32]ExtensionField)
+ }
+ for num, sx := range *sext {
+ xt := sx.Type()
+ xi := getExtensionFieldInfo(xt)
+ if xi.funcs.merge == nil {
+ continue
+ }
+ dx := (*dext)[num]
+ var dv pref.Value
+ if dx.Type() == sx.Type() {
+ dv = dx.Value()
+ }
+ if !dv.IsValid() && xi.unmarshalNeedsValue {
+ dv = xt.New()
+ }
+ dv = xi.funcs.merge(dv, sx.Value(), opts)
+ dx.Set(sx.Type(), dv)
+ (*dext)[num] = dx
+ }
+ }
+ if mi.unknownOffset.IsValid() {
+ su := mi.getUnknownBytes(src)
+ if su != nil && len(*su) > 0 {
+ du := mi.mutableUnknownBytes(dst)
+ *du = append(*du, *su...)
+ }
+ }
+}
+
+func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ return src
+}
+
+func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...))
+}
+
+func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ dstl := dst.List()
+ srcl := src.List()
+ for i, llen := 0, srcl.Len(); i < llen; i++ {
+ dstl.Append(srcl.Get(i))
+ }
+ return dst
+}
+
+func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ dstl := dst.List()
+ srcl := src.List()
+ for i, llen := 0, srcl.Len(); i < llen; i++ {
+ sb := srcl.Get(i).Bytes()
+ db := append(emptyBuf[:], sb...)
+ dstl.Append(pref.ValueOfBytes(db))
+ }
+ return dst
+}
+
+func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ dstl := dst.List()
+ srcl := src.List()
+ for i, llen := 0, srcl.Len(); i < llen; i++ {
+ sm := srcl.Get(i).Message()
+ dm := proto.Clone(sm.Interface()).ProtoReflect()
+ dstl.Append(pref.ValueOfMessage(dm))
+ }
+ return dst
+}
+
+func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ opts.Merge(dst.Message().Interface(), src.Message().Interface())
+ return dst
+}
+
+func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ if f.mi != nil {
+ if dst.Elem().IsNil() {
+ dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ f.mi.mergePointer(dst.Elem(), src.Elem(), opts)
+ } else {
+ dm := dst.AsValueOf(f.ft).Elem()
+ sm := src.AsValueOf(f.ft).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.New(f.ft.Elem()))
+ }
+ opts.Merge(asMessage(dm), asMessage(sm))
+ }
+}
+
+func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ for _, sp := range src.PointerSlice() {
+ dm := reflect.New(f.ft.Elem().Elem())
+ if f.mi != nil {
+ f.mi.mergePointer(pointerOfValue(dm), sp, opts)
+ } else {
+ opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem())))
+ }
+ dst.AppendPointerSlice(pointerOfValue(dm))
+ }
+}
+
+func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...)
+}
+
+func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Bytes()
+ if len(v) > 0 {
+ *dst.Bytes() = append(emptyBuf[:], v...)
+ }
+}
+
+func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.BytesSlice()
+ for _, v := range *src.BytesSlice() {
+ *ds = append(*ds, append(emptyBuf[:], v...))
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go
new file mode 100644
index 0000000..8816c27
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go
@@ -0,0 +1,209 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import ()
+
+func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Bool() = *src.Bool()
+}
+
+func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Bool()
+ if v != false {
+ *dst.Bool() = v
+ }
+}
+
+func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.BoolPtr()
+ if p != nil {
+ v := *p
+ *dst.BoolPtr() = &v
+ }
+}
+
+func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.BoolSlice()
+ ss := src.BoolSlice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Int32() = *src.Int32()
+}
+
+func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Int32()
+ if v != 0 {
+ *dst.Int32() = v
+ }
+}
+
+func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Int32Ptr()
+ if p != nil {
+ v := *p
+ *dst.Int32Ptr() = &v
+ }
+}
+
+func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Int32Slice()
+ ss := src.Int32Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Uint32() = *src.Uint32()
+}
+
+func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Uint32()
+ if v != 0 {
+ *dst.Uint32() = v
+ }
+}
+
+func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Uint32Ptr()
+ if p != nil {
+ v := *p
+ *dst.Uint32Ptr() = &v
+ }
+}
+
+func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Uint32Slice()
+ ss := src.Uint32Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Int64() = *src.Int64()
+}
+
+func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Int64()
+ if v != 0 {
+ *dst.Int64() = v
+ }
+}
+
+func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Int64Ptr()
+ if p != nil {
+ v := *p
+ *dst.Int64Ptr() = &v
+ }
+}
+
+func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Int64Slice()
+ ss := src.Int64Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Uint64() = *src.Uint64()
+}
+
+func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Uint64()
+ if v != 0 {
+ *dst.Uint64() = v
+ }
+}
+
+func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Uint64Ptr()
+ if p != nil {
+ v := *p
+ *dst.Uint64Ptr() = &v
+ }
+}
+
+func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Uint64Slice()
+ ss := src.Uint64Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Float32() = *src.Float32()
+}
+
+func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Float32()
+ if v != 0 {
+ *dst.Float32() = v
+ }
+}
+
+func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Float32Ptr()
+ if p != nil {
+ v := *p
+ *dst.Float32Ptr() = &v
+ }
+}
+
+func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Float32Slice()
+ ss := src.Float32Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Float64() = *src.Float64()
+}
+
+func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Float64()
+ if v != 0 {
+ *dst.Float64() = v
+ }
+}
+
+func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Float64Ptr()
+ if p != nil {
+ v := *p
+ *dst.Float64Ptr() = &v
+ }
+}
+
+func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Float64Slice()
+ ss := src.Float64Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.String() = *src.String()
+}
+
+func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.String()
+ if v != "" {
+ *dst.String() = v
+ }
+}
+
+func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.StringPtr()
+ if p != nil {
+ v := *p
+ *dst.StringPtr() = &v
+ }
+}
+
+func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.StringSlice()
+ ss := src.StringSlice()
+ *ds = append(*ds, *ss...)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
new file mode 100644
index 0000000..a104e28
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -0,0 +1,276 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// MessageInfo provides protobuf related functionality for a given Go type
+// that represents a message. A given instance of MessageInfo is tied to
+// exactly one Go type, which must be a pointer to a struct type.
+//
+// The exported fields must be populated before any methods are called
+// and cannot be mutated after set.
+type MessageInfo struct {
+ // GoReflectType is the underlying message Go type and must be populated.
+ GoReflectType reflect.Type // pointer to struct
+
+ // Desc is the underlying message descriptor type and must be populated.
+ Desc pref.MessageDescriptor
+
+ // Exporter must be provided in a purego environment in order to provide
+ // access to unexported fields.
+ Exporter exporter
+
+ // OneofWrappers is list of pointers to oneof wrapper struct types.
+ OneofWrappers []interface{}
+
+ initMu sync.Mutex // protects all unexported fields
+ initDone uint32
+
+ reflectMessageInfo // for reflection implementation
+ coderMessageInfo // for fast-path method implementations
+}
+
+// exporter is a function that returns a reference to the ith field of v,
+// where v is a pointer to a struct. It returns nil if it does not support
+// exporting the requested field (e.g., already exported).
+type exporter func(v interface{}, i int) interface{}
+
+// getMessageInfo returns the MessageInfo for any message type that
+// is generated by our implementation of protoc-gen-go (for v2 and on).
+// If it is unable to obtain a MessageInfo, it returns nil.
+func getMessageInfo(mt reflect.Type) *MessageInfo {
+ m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage)
+ if !ok {
+ return nil
+ }
+ mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo })
+ if !ok {
+ return nil
+ }
+ return mr.ProtoMessageInfo()
+}
+
+func (mi *MessageInfo) init() {
+ // This function is called in the hot path. Inline the sync.Once logic,
+ // since allocating a closure for Once.Do is expensive.
+ // Keep init small to ensure that it can be inlined.
+ if atomic.LoadUint32(&mi.initDone) == 0 {
+ mi.initOnce()
+ }
+}
+
+func (mi *MessageInfo) initOnce() {
+ mi.initMu.Lock()
+ defer mi.initMu.Unlock()
+ if mi.initDone == 1 {
+ return
+ }
+
+ t := mi.GoReflectType
+ if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ panic(fmt.Sprintf("got %v, want *struct kind", t))
+ }
+ t = t.Elem()
+
+ si := mi.makeStructInfo(t)
+ mi.makeReflectFuncs(t, si)
+ mi.makeCoderMethods(t, si)
+
+ atomic.StoreUint32(&mi.initDone, 1)
+}
+
+// getPointer returns the pointer for a message, which should be of
+// the type of the MessageInfo. If the message is of a different type,
+// it returns ok==false.
+func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) {
+ switch m := m.(type) {
+ case *messageState:
+ return m.pointer(), m.messageInfo() == mi
+ case *messageReflectWrapper:
+ return m.pointer(), m.messageInfo() == mi
+ }
+ return pointer{}, false
+}
+
+type (
+ SizeCache = int32
+ WeakFields = map[int32]protoreflect.ProtoMessage
+ UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB
+ unknownFieldsA = []byte
+ unknownFieldsB = *[]byte
+ ExtensionFields = map[int32]ExtensionField
+)
+
+var (
+ sizecacheType = reflect.TypeOf(SizeCache(0))
+ weakFieldsType = reflect.TypeOf(WeakFields(nil))
+ unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil))
+ unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil))
+ extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
+)
+
+type structInfo struct {
+ sizecacheOffset offset
+ sizecacheType reflect.Type
+ weakOffset offset
+ weakType reflect.Type
+ unknownOffset offset
+ unknownType reflect.Type
+ extensionOffset offset
+ extensionType reflect.Type
+
+ fieldsByNumber map[pref.FieldNumber]reflect.StructField
+ oneofsByName map[pref.Name]reflect.StructField
+ oneofWrappersByType map[reflect.Type]pref.FieldNumber
+ oneofWrappersByNumber map[pref.FieldNumber]reflect.Type
+}
+
+func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
+ si := structInfo{
+ sizecacheOffset: invalidOffset,
+ weakOffset: invalidOffset,
+ unknownOffset: invalidOffset,
+ extensionOffset: invalidOffset,
+
+ fieldsByNumber: map[pref.FieldNumber]reflect.StructField{},
+ oneofsByName: map[pref.Name]reflect.StructField{},
+ oneofWrappersByType: map[reflect.Type]pref.FieldNumber{},
+ oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{},
+ }
+
+fieldLoop:
+ for i := 0; i < t.NumField(); i++ {
+ switch f := t.Field(i); f.Name {
+ case genid.SizeCache_goname, genid.SizeCacheA_goname:
+ if f.Type == sizecacheType {
+ si.sizecacheOffset = offsetOf(f, mi.Exporter)
+ si.sizecacheType = f.Type
+ }
+ case genid.WeakFields_goname, genid.WeakFieldsA_goname:
+ if f.Type == weakFieldsType {
+ si.weakOffset = offsetOf(f, mi.Exporter)
+ si.weakType = f.Type
+ }
+ case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
+ if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
+ si.unknownOffset = offsetOf(f, mi.Exporter)
+ si.unknownType = f.Type
+ }
+ case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname:
+ if f.Type == extensionFieldsType {
+ si.extensionOffset = offsetOf(f, mi.Exporter)
+ si.extensionType = f.Type
+ }
+ default:
+ for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
+ if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
+ n, _ := strconv.ParseUint(s, 10, 64)
+ si.fieldsByNumber[pref.FieldNumber(n)] = f
+ continue fieldLoop
+ }
+ }
+ if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 {
+ si.oneofsByName[pref.Name(s)] = f
+ continue fieldLoop
+ }
+ }
+ }
+
+ // Derive a mapping of oneof wrappers to fields.
+ oneofWrappers := mi.OneofWrappers
+ for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
+ if fn, ok := reflect.PtrTo(t).MethodByName(method); ok {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ oneofWrappers = vs
+ }
+ }
+ }
+ }
+ for _, v := range oneofWrappers {
+ tf := reflect.TypeOf(v).Elem()
+ f := tf.Field(0)
+ for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
+ if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
+ n, _ := strconv.ParseUint(s, 10, 64)
+ si.oneofWrappersByType[tf] = pref.FieldNumber(n)
+ si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf
+ break
+ }
+ }
+ }
+
+ return si
+}
+
+func (mi *MessageInfo) New() protoreflect.Message {
+ return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface())
+}
+func (mi *MessageInfo) Zero() protoreflect.Message {
+ return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface())
+}
+func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor {
+ return mi.Desc
+}
+func (mi *MessageInfo) Enum(i int) protoreflect.EnumType {
+ mi.init()
+ fd := mi.Desc.Fields().Get(i)
+ return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()])
+}
+func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
+ mi.init()
+ fd := mi.Desc.Fields().Get(i)
+ switch {
+ case fd.IsWeak():
+ mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName())
+ return mt
+ case fd.IsMap():
+ return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
+ default:
+ return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()])
+ }
+}
+
+type mapEntryType struct {
+ desc protoreflect.MessageDescriptor
+ valType interface{} // zero value of enum or message type
+}
+
+func (mt mapEntryType) New() protoreflect.Message {
+ return nil
+}
+func (mt mapEntryType) Zero() protoreflect.Message {
+ return nil
+}
+func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor {
+ return mt.desc
+}
+func (mt mapEntryType) Enum(i int) protoreflect.EnumType {
+ fd := mt.desc.Fields().Get(i)
+ if fd.Enum() == nil {
+ return nil
+ }
+ return Export{}.EnumTypeOf(mt.valType)
+}
+func (mt mapEntryType) Message(i int) protoreflect.MessageType {
+ fd := mt.desc.Fields().Get(i)
+ if fd.Message() == nil {
+ return nil
+ }
+ return Export{}.MessageTypeOf(mt.valType)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
new file mode 100644
index 0000000..9488b72
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -0,0 +1,465 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/internal/detrand"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type reflectMessageInfo struct {
+ fields map[pref.FieldNumber]*fieldInfo
+ oneofs map[pref.Name]*oneofInfo
+
+ // fieldTypes contains the zero value of an enum or message field.
+ // For lists, it contains the element type.
+ // For maps, it contains the entry value type.
+ fieldTypes map[pref.FieldNumber]interface{}
+
+ // denseFields is a subset of fields where:
+ // 0 < fieldDesc.Number() < len(denseFields)
+ // It provides faster access to the fieldInfo, but may be incomplete.
+ denseFields []*fieldInfo
+
+ // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs.
+ rangeInfos []interface{} // either *fieldInfo or *oneofInfo
+
+ getUnknown func(pointer) pref.RawFields
+ setUnknown func(pointer, pref.RawFields)
+ extensionMap func(pointer) *extensionMap
+
+ nilMessage atomicNilMessage
+}
+
+// makeReflectFuncs generates the set of functions to support reflection.
+func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) {
+ mi.makeKnownFieldsFunc(si)
+ mi.makeUnknownFieldsFunc(t, si)
+ mi.makeExtensionFieldsFunc(t, si)
+ mi.makeFieldTypes(si)
+}
+
+// makeKnownFieldsFunc generates functions for operations that can be performed
+// on each protobuf message field. It takes in a reflect.Type representing the
+// Go struct and matches message fields with struct fields.
+//
+// This code assumes that the struct is well-formed and panics if there are
+// any discrepancies.
+func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
+ mi.fields = map[pref.FieldNumber]*fieldInfo{}
+ md := mi.Desc
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ fs := si.fieldsByNumber[fd.Number()]
+ isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+ if isOneof {
+ fs = si.oneofsByName[fd.ContainingOneof().Name()]
+ }
+ var fi fieldInfo
+ switch {
+ case fs.Type == nil:
+ fi = fieldInfoForMissing(fd) // never occurs for officially generated message types
+ case isOneof:
+ fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+ case fd.IsMap():
+ fi = fieldInfoForMap(fd, fs, mi.Exporter)
+ case fd.IsList():
+ fi = fieldInfoForList(fd, fs, mi.Exporter)
+ case fd.IsWeak():
+ fi = fieldInfoForWeakMessage(fd, si.weakOffset)
+ case fd.Message() != nil:
+ fi = fieldInfoForMessage(fd, fs, mi.Exporter)
+ default:
+ fi = fieldInfoForScalar(fd, fs, mi.Exporter)
+ }
+ mi.fields[fd.Number()] = &fi
+ }
+
+ mi.oneofs = map[pref.Name]*oneofInfo{}
+ for i := 0; i < md.Oneofs().Len(); i++ {
+ od := md.Oneofs().Get(i)
+ mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter)
+ }
+
+ mi.denseFields = make([]*fieldInfo, fds.Len()*2)
+ for i := 0; i < fds.Len(); i++ {
+ if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
+ mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
+ }
+ }
+
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() {
+ mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
+ i += od.Fields().Len()
+ } else {
+ mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
+ i++
+ }
+ }
+
+ // Introduce instability to iteration order, but keep it deterministic.
+ if len(mi.rangeInfos) > 1 && detrand.Bool() {
+ i := detrand.Intn(len(mi.rangeInfos) - 1)
+ mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i]
+ }
+}
+
+func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) {
+ switch {
+ case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType:
+ // Handle as []byte.
+ mi.getUnknown = func(p pointer) pref.RawFields {
+ if p.IsNil() {
+ return nil
+ }
+ return *p.Apply(mi.unknownOffset).Bytes()
+ }
+ mi.setUnknown = func(p pointer, b pref.RawFields) {
+ if p.IsNil() {
+ panic("invalid SetUnknown on nil Message")
+ }
+ *p.Apply(mi.unknownOffset).Bytes() = b
+ }
+ case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType:
+ // Handle as *[]byte.
+ mi.getUnknown = func(p pointer) pref.RawFields {
+ if p.IsNil() {
+ return nil
+ }
+ bp := p.Apply(mi.unknownOffset).BytesPtr()
+ if *bp == nil {
+ return nil
+ }
+ return **bp
+ }
+ mi.setUnknown = func(p pointer, b pref.RawFields) {
+ if p.IsNil() {
+ panic("invalid SetUnknown on nil Message")
+ }
+ bp := p.Apply(mi.unknownOffset).BytesPtr()
+ if *bp == nil {
+ *bp = new([]byte)
+ }
+ **bp = b
+ }
+ default:
+ mi.getUnknown = func(pointer) pref.RawFields {
+ return nil
+ }
+ mi.setUnknown = func(p pointer, _ pref.RawFields) {
+ if p.IsNil() {
+ panic("invalid SetUnknown on nil Message")
+ }
+ }
+ }
+}
+
+func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) {
+ if si.extensionOffset.IsValid() {
+ mi.extensionMap = func(p pointer) *extensionMap {
+ if p.IsNil() {
+ return (*extensionMap)(nil)
+ }
+ v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType)
+ return (*extensionMap)(v.Interface().(*map[int32]ExtensionField))
+ }
+ } else {
+ mi.extensionMap = func(pointer) *extensionMap {
+ return (*extensionMap)(nil)
+ }
+ }
+}
+func (mi *MessageInfo) makeFieldTypes(si structInfo) {
+ md := mi.Desc
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); i++ {
+ var ft reflect.Type
+ fd := fds.Get(i)
+ fs := si.fieldsByNumber[fd.Number()]
+ isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+ if isOneof {
+ fs = si.oneofsByName[fd.ContainingOneof().Name()]
+ }
+ var isMessage bool
+ switch {
+ case fs.Type == nil:
+ continue // never occurs for officially generated message types
+ case isOneof:
+ if fd.Enum() != nil || fd.Message() != nil {
+ ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type
+ }
+ case fd.IsMap():
+ if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil {
+ ft = fs.Type.Elem()
+ }
+ isMessage = fd.MapValue().Message() != nil
+ case fd.IsList():
+ if fd.Enum() != nil || fd.Message() != nil {
+ ft = fs.Type.Elem()
+ }
+ isMessage = fd.Message() != nil
+ case fd.Enum() != nil:
+ ft = fs.Type
+ if fd.HasPresence() && ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ case fd.Message() != nil:
+ ft = fs.Type
+ if fd.IsWeak() {
+ ft = nil
+ }
+ isMessage = true
+ }
+ if isMessage && ft != nil && ft.Kind() != reflect.Ptr {
+ ft = reflect.PtrTo(ft) // never occurs for officially generated message types
+ }
+ if ft != nil {
+ if mi.fieldTypes == nil {
+ mi.fieldTypes = make(map[pref.FieldNumber]interface{})
+ }
+ mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface()
+ }
+ }
+}
+
+type extensionMap map[int32]ExtensionField
+
+func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+ if m != nil {
+ for _, x := range *m {
+ xd := x.Type().TypeDescriptor()
+ v := x.Value()
+ if xd.IsList() && v.List().Len() == 0 {
+ continue
+ }
+ if !f(xd, v) {
+ return
+ }
+ }
+ }
+}
+func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) {
+ if m == nil {
+ return false
+ }
+ xd := xt.TypeDescriptor()
+ x, ok := (*m)[int32(xd.Number())]
+ if !ok {
+ return false
+ }
+ switch {
+ case xd.IsList():
+ return x.Value().List().Len() > 0
+ case xd.IsMap():
+ return x.Value().Map().Len() > 0
+ case xd.Message() != nil:
+ return x.Value().Message().IsValid()
+ }
+ return true
+}
+func (m *extensionMap) Clear(xt pref.ExtensionType) {
+ delete(*m, int32(xt.TypeDescriptor().Number()))
+}
+func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value {
+ xd := xt.TypeDescriptor()
+ if m != nil {
+ if x, ok := (*m)[int32(xd.Number())]; ok {
+ return x.Value()
+ }
+ }
+ return xt.Zero()
+}
+func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) {
+ xd := xt.TypeDescriptor()
+ isValid := true
+ switch {
+ case !xt.IsValidValue(v):
+ isValid = false
+ case xd.IsList():
+ isValid = v.List().IsValid()
+ case xd.IsMap():
+ isValid = v.Map().IsValid()
+ case xd.Message() != nil:
+ isValid = v.Message().IsValid()
+ }
+ if !isValid {
+ panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName()))
+ }
+
+ if *m == nil {
+ *m = make(map[int32]ExtensionField)
+ }
+ var x ExtensionField
+ x.Set(xt, v)
+ (*m)[int32(xd.Number())] = x
+}
+func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value {
+ xd := xt.TypeDescriptor()
+ if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() {
+ panic("invalid Mutable on field with non-composite type")
+ }
+ if x, ok := (*m)[int32(xd.Number())]; ok {
+ return x.Value()
+ }
+ v := xt.New()
+ m.Set(xt, v)
+ return v
+}
+
+// MessageState is a data structure that is nested as the first field in a
+// concrete message. It provides a way to implement the ProtoReflect method
+// in an allocation-free way without needing to have a shadow Go type generated
+// for every message type. This technique only works using unsafe.
+//
+//
+// Example generated code:
+//
+// type M struct {
+// state protoimpl.MessageState
+//
+// Field1 int32
+// Field2 string
+// Field3 *BarMessage
+// ...
+// }
+//
+// func (m *M) ProtoReflect() protoreflect.Message {
+// mi := &file_fizz_buzz_proto_msgInfos[5]
+// if protoimpl.UnsafeEnabled && m != nil {
+// ms := protoimpl.X.MessageStateOf(Pointer(m))
+// if ms.LoadMessageInfo() == nil {
+// ms.StoreMessageInfo(mi)
+// }
+// return ms
+// }
+// return mi.MessageOf(m)
+// }
+//
+// The MessageState type holds a *MessageInfo, which must be atomically set to
+// the message info associated with a given message instance.
+// By unsafely converting a *M into a *MessageState, the MessageState object
+// has access to all the information needed to implement protobuf reflection.
+// It has access to the message info as its first field, and a pointer to the
+// MessageState is identical to a pointer to the concrete message value.
+//
+//
+// Requirements:
+// • The type M must implement protoreflect.ProtoMessage.
+// • The address of m must not be nil.
+// • The address of m and the address of m.state must be equal,
+// even though they are different Go types.
+type MessageState struct {
+ pragma.NoUnkeyedLiterals
+ pragma.DoNotCompare
+ pragma.DoNotCopy
+
+ atomicMessageInfo *MessageInfo
+}
+
+type messageState MessageState
+
+var (
+ _ pref.Message = (*messageState)(nil)
+ _ unwrapper = (*messageState)(nil)
+)
+
+// messageDataType is a tuple of a pointer to the message data and
+// a pointer to the message type. It is a generalized way of providing a
+// reflective view over a message instance. The disadvantage of this approach
+// is the need to allocate this tuple of 16B.
+type messageDataType struct {
+ p pointer
+ mi *MessageInfo
+}
+
+type (
+ messageReflectWrapper messageDataType
+ messageIfaceWrapper messageDataType
+)
+
+var (
+ _ pref.Message = (*messageReflectWrapper)(nil)
+ _ unwrapper = (*messageReflectWrapper)(nil)
+ _ pref.ProtoMessage = (*messageIfaceWrapper)(nil)
+ _ unwrapper = (*messageIfaceWrapper)(nil)
+)
+
+// MessageOf returns a reflective view over a message. The input must be a
+// pointer to a named Go struct. If the provided type has a ProtoReflect method,
+// it must be implemented by calling this method.
+func (mi *MessageInfo) MessageOf(m interface{}) pref.Message {
+ if reflect.TypeOf(m) != mi.GoReflectType {
+ panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
+ }
+ p := pointerOfIface(m)
+ if p.IsNil() {
+ return mi.nilMessage.Init(mi)
+ }
+ return &messageReflectWrapper{p, mi}
+}
+
+func (m *messageReflectWrapper) pointer() pointer { return m.p }
+func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi }
+
+// Reset implements the v1 proto.Message.Reset method.
+func (m *messageIfaceWrapper) Reset() {
+ if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok {
+ mr.Reset()
+ return
+ }
+ rv := reflect.ValueOf(m.protoUnwrap())
+ if rv.Kind() == reflect.Ptr && !rv.IsNil() {
+ rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+ }
+}
+func (m *messageIfaceWrapper) ProtoReflect() pref.Message {
+ return (*messageReflectWrapper)(m)
+}
+func (m *messageIfaceWrapper) protoUnwrap() interface{} {
+ return m.p.AsIfaceOf(m.mi.GoReflectType.Elem())
+}
+
+// checkField verifies that the provided field descriptor is valid.
+// Exactly one of the returned values is populated.
+func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) {
+ var fi *fieldInfo
+ if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) {
+ fi = mi.denseFields[n]
+ } else {
+ fi = mi.fields[n]
+ }
+ if fi != nil {
+ if fi.fieldDesc != fd {
+ if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want {
+ panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want))
+ }
+ panic(fmt.Sprintf("mismatching field: %v", fd.FullName()))
+ }
+ return fi, nil
+ }
+
+ if fd.IsExtension() {
+ if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want {
+ // TODO: Should this be exact containing message descriptor match?
+ panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want))
+ }
+ if !mi.Desc.ExtensionRanges().Has(fd.Number()) {
+ panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName()))
+ }
+ xtd, ok := fd.(pref.ExtensionTypeDescriptor)
+ if !ok {
+ panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName()))
+ }
+ return nil, xtd.Type()
+ }
+ panic(fmt.Sprintf("field %v is invalid", fd.FullName()))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
new file mode 100644
index 0000000..343cf87
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -0,0 +1,543 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sync"
+
+ "google.golang.org/protobuf/internal/flags"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+type fieldInfo struct {
+ fieldDesc pref.FieldDescriptor
+
+ // These fields are used for protobuf reflection support.
+ has func(pointer) bool
+ clear func(pointer)
+ get func(pointer) pref.Value
+ set func(pointer, pref.Value)
+ mutable func(pointer) pref.Value
+ newMessage func() pref.Message
+ newField func() pref.Value
+}
+
+func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo {
+ // This never occurs for generated message types.
+ // It implies that a hand-crafted type has missing Go fields
+ // for specific protobuf message fields.
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ return false
+ },
+ clear: func(p pointer) {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ get: func(p pointer) pref.Value {
+ return fd.Default()
+ },
+ set: func(p pointer, v pref.Value) {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ mutable: func(p pointer) pref.Value {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ newMessage: func() pref.Message {
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ newField: func() pref.Value {
+ if v := fd.Default(); v.IsValid() {
+ return v
+ }
+ panic("missing Go struct field for " + string(fd.FullName()))
+ },
+ }
+}
+
+func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Interface {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft))
+ }
+ if ot.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot))
+ }
+ if !reflect.PtrTo(ot).Implements(ft) {
+ panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft))
+ }
+ conv := NewConverter(ot.Field(0).Type, fd)
+ isMessage := fd.Message() != nil
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ // NOTE: The logic below intentionally assumes that oneof fields are
+ // well-formatted. That is, the oneof interface never contains a
+ // typed nil pointer to one of the wrapper structs.
+
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ return false
+ }
+ return true
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot {
+ // NOTE: We intentionally don't check for rv.Elem().IsNil()
+ // so that (*OneofWrapperType)(nil) gets cleared to nil.
+ return
+ }
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ return conv.Zero()
+ }
+ rv = rv.Elem().Elem().Field(0)
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ rv.Set(reflect.New(ot))
+ }
+ rv = rv.Elem().Elem().Field(0)
+ rv.Set(conv.GoValueOf(v))
+ },
+ mutable: func(p pointer) pref.Value {
+ if !isMessage {
+ panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName()))
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ rv.Set(reflect.New(ot))
+ }
+ rv = rv.Elem().Elem().Field(0)
+ if rv.Kind() == reflect.Ptr && rv.IsNil() {
+ rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message())))
+ }
+ return conv.PBValueOf(rv)
+ },
+ newMessage: func() pref.Message {
+ return conv.New().Message()
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Map {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft))
+ }
+ conv := NewConverter(ft, fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName()))
+ }
+ rv.Set(pv)
+ },
+ mutable: func(p pointer) pref.Value {
+ v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(fs.Type))
+ }
+ return conv.PBValueOf(v)
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft))
+ }
+ conv := NewConverter(reflect.PtrTo(ft), fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
+ if rv.Elem().Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName()))
+ }
+ rv.Set(pv.Elem())
+ },
+ mutable: func(p pointer) pref.Value {
+ v := p.Apply(fieldOffset).AsValueOf(fs.Type)
+ return conv.PBValueOf(v)
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+var (
+ nilBytes = reflect.ValueOf([]byte(nil))
+ emptyBytes = reflect.ValueOf([]byte{})
+)
+
+func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ nullable := fd.HasPresence()
+ isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
+ if nullable {
+ if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
+ // This never occurs for generated message types.
+ // Despite the protobuf type system specifying presence,
+ // the Go field type cannot represent it.
+ nullable = false
+ }
+ if ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ }
+ conv := NewConverter(ft, fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if nullable {
+ return !rv.IsNil()
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return rv.Bool()
+ case reflect.Int32, reflect.Int64:
+ return rv.Int() != 0
+ case reflect.Uint32, reflect.Uint64:
+ return rv.Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() != 0 || math.Signbit(rv.Float())
+ case reflect.String, reflect.Slice:
+ return rv.Len() > 0
+ default:
+ panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen
+ }
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if nullable {
+ if rv.IsNil() {
+ return conv.Zero()
+ }
+ if rv.Kind() == reflect.Ptr {
+ rv = rv.Elem()
+ }
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if nullable && rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(ft))
+ }
+ rv = rv.Elem()
+ }
+ rv.Set(conv.GoValueOf(v))
+ if isBytes && rv.Len() == 0 {
+ if nullable {
+ rv.Set(emptyBytes) // preserve presence
+ } else {
+ rv.Set(nilBytes) // do not preserve presence
+ }
+ }
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo {
+ if !flags.ProtoLegacy {
+ panic("no support for proto1 weak fields")
+ }
+
+ var once sync.Once
+ var messageType pref.MessageType
+ lazyInit := func() {
+ once.Do(func() {
+ messageName := fd.Message().FullName()
+ messageType, _ = preg.GlobalTypes.FindMessageByName(messageName)
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
+ }
+ })
+ }
+
+ num := fd.Number()
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ _, ok := p.Apply(weakOffset).WeakFields().get(num)
+ return ok
+ },
+ clear: func(p pointer) {
+ p.Apply(weakOffset).WeakFields().clear(num)
+ },
+ get: func(p pointer) pref.Value {
+ lazyInit()
+ if p.IsNil() {
+ return pref.ValueOfMessage(messageType.Zero())
+ }
+ m, ok := p.Apply(weakOffset).WeakFields().get(num)
+ if !ok {
+ return pref.ValueOfMessage(messageType.Zero())
+ }
+ return pref.ValueOfMessage(m.ProtoReflect())
+ },
+ set: func(p pointer, v pref.Value) {
+ lazyInit()
+ m := v.Message()
+ if m.Descriptor() != messageType.Descriptor() {
+ if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
+ panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
+ }
+ panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
+ }
+ p.Apply(weakOffset).WeakFields().set(num, m.Interface())
+ },
+ mutable: func(p pointer) pref.Value {
+ lazyInit()
+ fs := p.Apply(weakOffset).WeakFields()
+ m, ok := fs.get(num)
+ if !ok {
+ m = messageType.New().Interface()
+ fs.set(num, m)
+ }
+ return pref.ValueOfMessage(m.ProtoReflect())
+ },
+ newMessage: func() pref.Message {
+ lazyInit()
+ return messageType.New()
+ },
+ newField: func() pref.Value {
+ lazyInit()
+ return pref.ValueOfMessage(messageType.New())
+ },
+ }
+}
+
+func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ conv := NewConverter(ft, fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if fs.Type.Kind() != reflect.Ptr {
+ return !isZero(rv)
+ }
+ return !rv.IsNil()
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(conv.GoValueOf(v))
+ if fs.Type.Kind() == reflect.Ptr && rv.IsNil() {
+ panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName()))
+ }
+ },
+ mutable: func(p pointer) pref.Value {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if fs.Type.Kind() == reflect.Ptr && rv.IsNil() {
+ rv.Set(conv.GoValueOf(conv.New()))
+ }
+ return conv.PBValueOf(rv)
+ },
+ newMessage: func() pref.Message {
+ return conv.New().Message()
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+type oneofInfo struct {
+ oneofDesc pref.OneofDescriptor
+ which func(pointer) pref.FieldNumber
+}
+
+func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+ oi := &oneofInfo{oneofDesc: od}
+ if od.IsSynthetic() {
+ fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
+ fieldOffset := offsetOf(fs, x)
+ oi.which = func(p pointer) pref.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() { // valid on either *T or []byte
+ return 0
+ }
+ return od.Fields().Get(0).Number()
+ }
+ } else {
+ fs := si.oneofsByName[od.Name()]
+ fieldOffset := offsetOf(fs, x)
+ oi.which = func(p pointer) pref.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ return 0
+ }
+ rv = rv.Elem()
+ if rv.IsNil() {
+ return 0
+ }
+ return si.oneofWrappersByType[rv.Type().Elem()]
+ }
+ }
+ return oi
+}
+
+// isZero is identical to reflect.Value.IsZero.
+// TODO: Remove this when Go1.13 is the minimally supported Go version.
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return math.Float64bits(v.Float()) == 0
+ case reflect.Complex64, reflect.Complex128:
+ c := v.Complex()
+ return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+ case reflect.Array:
+ for i := 0; i < v.Len(); i++ {
+ if !isZero(v.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+ return v.IsNil()
+ case reflect.String:
+ return v.Len() == 0
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ default:
+ panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()})
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
new file mode 100644
index 0000000..741d6e5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
@@ -0,0 +1,249 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (m *messageState) Descriptor() protoreflect.MessageDescriptor {
+ return m.messageInfo().Desc
+}
+func (m *messageState) Type() protoreflect.MessageType {
+ return m.messageInfo()
+}
+func (m *messageState) New() protoreflect.Message {
+ return m.messageInfo().New()
+}
+func (m *messageState) Interface() protoreflect.ProtoMessage {
+ return m.protoUnwrap().(protoreflect.ProtoMessage)
+}
+func (m *messageState) protoUnwrap() interface{} {
+ return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
+}
+func (m *messageState) ProtoMethods() *protoiface.Methods {
+ m.messageInfo().init()
+ return &m.messageInfo().methods
+}
+
+// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
+// to be able to retrieve a v2 MessageInfo struct.
+//
+// WARNING: This method is exempt from the compatibility promise and
+// may be removed in the future without warning.
+func (m *messageState) ProtoMessageInfo() *MessageInfo {
+ return m.messageInfo()
+}
+
+func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ m.messageInfo().init()
+ for _, ri := range m.messageInfo().rangeInfos {
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ if ri.has(m.pointer()) {
+ if !f(ri.fieldDesc, ri.get(m.pointer())) {
+ return
+ }
+ }
+ case *oneofInfo:
+ if n := ri.which(m.pointer()); n > 0 {
+ fi := m.messageInfo().fields[n]
+ if !f(fi.fieldDesc, fi.get(m.pointer())) {
+ return
+ }
+ }
+ }
+ }
+ m.messageInfo().extensionMap(m.pointer()).Range(f)
+}
+func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.has(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ }
+}
+func (m *messageState) Clear(fd protoreflect.FieldDescriptor) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.clear(m.pointer())
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ }
+}
+func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.get(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ }
+}
+func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.set(m.pointer(), v)
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ }
+}
+func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.mutable(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ }
+}
+func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.newField()
+ } else {
+ return xt.New()
+ }
+}
+func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+ m.messageInfo().init()
+ if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ return od.Fields().ByNumber(oi.which(m.pointer()))
+ }
+ panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
+}
+func (m *messageState) GetUnknown() protoreflect.RawFields {
+ m.messageInfo().init()
+ return m.messageInfo().getUnknown(m.pointer())
+}
+func (m *messageState) SetUnknown(b protoreflect.RawFields) {
+ m.messageInfo().init()
+ m.messageInfo().setUnknown(m.pointer(), b)
+}
+func (m *messageState) IsValid() bool {
+ return !m.pointer().IsNil()
+}
+
+func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor {
+ return m.messageInfo().Desc
+}
+func (m *messageReflectWrapper) Type() protoreflect.MessageType {
+ return m.messageInfo()
+}
+func (m *messageReflectWrapper) New() protoreflect.Message {
+ return m.messageInfo().New()
+}
+func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage {
+ if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok {
+ return m
+ }
+ return (*messageIfaceWrapper)(m)
+}
+func (m *messageReflectWrapper) protoUnwrap() interface{} {
+ return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
+}
+func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods {
+ m.messageInfo().init()
+ return &m.messageInfo().methods
+}
+
+// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
+// to be able to retrieve a v2 MessageInfo struct.
+//
+// WARNING: This method is exempt from the compatibility promise and
+// may be removed in the future without warning.
+func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo {
+ return m.messageInfo()
+}
+
+func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ m.messageInfo().init()
+ for _, ri := range m.messageInfo().rangeInfos {
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ if ri.has(m.pointer()) {
+ if !f(ri.fieldDesc, ri.get(m.pointer())) {
+ return
+ }
+ }
+ case *oneofInfo:
+ if n := ri.which(m.pointer()); n > 0 {
+ fi := m.messageInfo().fields[n]
+ if !f(fi.fieldDesc, fi.get(m.pointer())) {
+ return
+ }
+ }
+ }
+ }
+ m.messageInfo().extensionMap(m.pointer()).Range(f)
+}
+func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.has(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ }
+}
+func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.clear(m.pointer())
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ }
+}
+func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.get(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ }
+}
+func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.set(m.pointer(), v)
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ }
+}
+func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.mutable(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ }
+}
+func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.newField()
+ } else {
+ return xt.New()
+ }
+}
+func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+ m.messageInfo().init()
+ if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ return od.Fields().ByNumber(oi.which(m.pointer()))
+ }
+ panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
+}
+func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields {
+ m.messageInfo().init()
+ return m.messageInfo().getUnknown(m.pointer())
+}
+func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) {
+ m.messageInfo().init()
+ m.messageInfo().setUnknown(m.pointer(), b)
+}
+func (m *messageReflectWrapper) IsValid() bool {
+ return !m.pointer().IsNil()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
new file mode 100644
index 0000000..9e3ed82
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -0,0 +1,178 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+const UnsafeEnabled = false
+
+// Pointer is an opaque pointer type.
+type Pointer interface{}
+
+// offset represents the offset to a struct field, accessible from a pointer.
+// The offset is the field index into a struct.
+type offset struct {
+ index int
+ export exporter
+}
+
+// offsetOf returns a field offset for the struct field.
+func offsetOf(f reflect.StructField, x exporter) offset {
+ if len(f.Index) != 1 {
+ panic("embedded structs are not supported")
+ }
+ if f.PkgPath == "" {
+ return offset{index: f.Index[0]} // field is already exported
+ }
+ if x == nil {
+ panic("exporter must be provided for unexported field")
+ }
+ return offset{index: f.Index[0], export: x}
+}
+
+// IsValid reports whether the offset is valid.
+func (f offset) IsValid() bool { return f.index >= 0 }
+
+// invalidOffset is an invalid field offset.
+var invalidOffset = offset{index: -1}
+
+// zeroOffset is a noop when calling pointer.Apply.
+var zeroOffset = offset{index: 0}
+
+// pointer is an abstract representation of a pointer to a struct or field.
+type pointer struct{ v reflect.Value }
+
+// pointerOf returns p as a pointer.
+func pointerOf(p Pointer) pointer {
+ return pointerOfIface(p)
+}
+
+// pointerOfValue returns v as a pointer.
+func pointerOfValue(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// pointerOfIface returns the pointer portion of an interface.
+func pointerOfIface(v interface{}) pointer {
+ return pointer{v: reflect.ValueOf(v)}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p pointer) IsNil() bool {
+ return p.v.IsNil()
+}
+
+// Apply adds an offset to the pointer to derive a new pointer
+// to a specified field. The current pointer must be pointing at a struct.
+func (p pointer) Apply(f offset) pointer {
+ if f.export != nil {
+ if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
+ return pointer{v: v}
+ }
+ }
+ return pointer{v: p.v.Elem().Field(f.index).Addr()}
+}
+
+// AsValueOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
+func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
+ if got := p.v.Type().Elem(); got != t {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
+ }
+ return p.v
+}
+
+// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to p.AsValueOf(t).Interface()
+func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+ return p.AsValueOf(t).Interface()
+}
+
+func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
+func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
+func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
+func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
+func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
+func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
+func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
+func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
+func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
+func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
+func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
+func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
+func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
+func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
+func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
+func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
+func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
+func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
+func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
+func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
+func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
+func (p pointer) String() *string { return p.v.Interface().(*string) }
+func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
+func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
+func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
+func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) }
+func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
+func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
+func (p pointer) Extensions() *map[int32]ExtensionField {
+ return p.v.Interface().(*map[int32]ExtensionField)
+}
+
+func (p pointer) Elem() pointer {
+ return pointer{v: p.v.Elem()}
+}
+
+// PointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) PointerSlice() []pointer {
+ // TODO: reconsider this
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// AppendPointerSlice appends v to p, which must be a []*T.
+func (p pointer) AppendPointerSlice(v pointer) {
+ sp := p.v.Elem()
+ sp.Set(reflect.Append(sp, v.v))
+}
+
+// SetPointer sets *p to v.
+func (p pointer) SetPointer(v pointer) {
+ p.v.Elem().Set(v.v)
+}
+
+func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
+func (ms *messageState) pointer() pointer { panic("not supported") }
+func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
+func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
+func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
+
+type atomicNilMessage struct {
+ once sync.Once
+ m messageReflectWrapper
+}
+
+func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
+ m.once.Do(func() {
+ m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
+ m.m.mi = mi
+ })
+ return &m.m
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
new file mode 100644
index 0000000..9ecf23a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -0,0 +1,174 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package impl
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const UnsafeEnabled = true
+
+// Pointer is an opaque pointer type.
+type Pointer unsafe.Pointer
+
+// offset represents the offset to a struct field, accessible from a pointer.
+// The offset is the byte offset to the field from the start of the struct.
+type offset uintptr
+
+// offsetOf returns a field offset for the struct field.
+func offsetOf(f reflect.StructField, x exporter) offset {
+ return offset(f.Offset)
+}
+
+// IsValid reports whether the offset is valid.
+func (f offset) IsValid() bool { return f != invalidOffset }
+
+// invalidOffset is an invalid field offset.
+var invalidOffset = ^offset(0)
+
+// zeroOffset is a noop when calling pointer.Apply.
+var zeroOffset = offset(0)
+
+// pointer is a pointer to a message struct or field.
+type pointer struct{ p unsafe.Pointer }
+
+// pointerOf returns p as a pointer.
+func pointerOf(p Pointer) pointer {
+ return pointer{p: unsafe.Pointer(p)}
+}
+
+// pointerOfValue returns v as a pointer.
+func pointerOfValue(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// pointerOfIface returns the pointer portion of an interface.
+func pointerOfIface(v interface{}) pointer {
+ type ifaceHeader struct {
+ Type unsafe.Pointer
+ Data unsafe.Pointer
+ }
+ return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p pointer) IsNil() bool {
+ return p.p == nil
+}
+
+// Apply adds an offset to the pointer to derive a new pointer
+// to a specified field. The pointer must be valid and pointing at a struct.
+func (p pointer) Apply(f offset) pointer {
+ if p.IsNil() {
+ panic("invalid nil pointer")
+ }
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+// AsValueOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
+func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to p.AsValueOf(t).Interface()
+func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+ // TODO: Use tricky unsafe magic to directly create ifaceHeader.
+ return p.AsValueOf(t).Interface()
+}
+
+func (p pointer) Bool() *bool { return (*bool)(p.p) }
+func (p pointer) BoolPtr() **bool { return (**bool)(p.p) }
+func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) }
+func (p pointer) Int32() *int32 { return (*int32)(p.p) }
+func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) }
+func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) }
+func (p pointer) Int64() *int64 { return (*int64)(p.p) }
+func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) }
+func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) }
+func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) }
+func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) }
+func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) }
+func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) }
+func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) }
+func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) }
+func (p pointer) Float32() *float32 { return (*float32)(p.p) }
+func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) }
+func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) }
+func (p pointer) Float64() *float64 { return (*float64)(p.p) }
+func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) }
+func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) }
+func (p pointer) String() *string { return (*string)(p.p) }
+func (p pointer) StringPtr() **string { return (**string)(p.p) }
+func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) }
+func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) }
+func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) }
+func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
+func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
+func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
+
+func (p pointer) Elem() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// PointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) PointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// AppendPointerSlice appends v to p, which must be a []*T.
+func (p pointer) AppendPointerSlice(v pointer) {
+ *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v)
+}
+
+// SetPointer sets *p to v.
+func (p pointer) SetPointer(v pointer) {
+ *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p)
+}
+
+// Static check that MessageState does not exceed the size of a pointer.
+const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{}))
+
+func (Export) MessageStateOf(p Pointer) *messageState {
+ // Super-tricky - see documentation on MessageState.
+ return (*messageState)(unsafe.Pointer(p))
+}
+func (ms *messageState) pointer() pointer {
+ // Super-tricky - see documentation on MessageState.
+ return pointer{p: unsafe.Pointer(ms)}
+}
+func (ms *messageState) messageInfo() *MessageInfo {
+ mi := ms.LoadMessageInfo()
+ if mi == nil {
+ panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct")
+ }
+ return mi
+}
+func (ms *messageState) LoadMessageInfo() *MessageInfo {
+ return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo))))
+}
+func (ms *messageState) StoreMessageInfo(mi *MessageInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi))
+}
+
+type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper
+
+func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
+ if p := atomic.LoadPointer(&m.p); p != nil {
+ return (*messageReflectWrapper)(p)
+ }
+ w := &messageReflectWrapper{mi: mi}
+ atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w))
+ return (*messageReflectWrapper)(atomic.LoadPointer(&m.p))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
new file mode 100644
index 0000000..08cfb60
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -0,0 +1,576 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math"
+ "math/bits"
+ "reflect"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// ValidationStatus is the result of validating the wire-format encoding of a message.
+type ValidationStatus int
+
+const (
+ // ValidationUnknown indicates that unmarshaling the message might succeed or fail.
+ // The validator was unable to render a judgement.
+ //
+ // The only causes of this status are an aberrant message type appearing somewhere
+ // in the message or a failure in the extension resolver.
+ ValidationUnknown ValidationStatus = iota + 1
+
+ // ValidationInvalid indicates that unmarshaling the message will fail.
+ ValidationInvalid
+
+ // ValidationValid indicates that unmarshaling the message will succeed.
+ ValidationValid
+)
+
+func (v ValidationStatus) String() string {
+ switch v {
+ case ValidationUnknown:
+ return "ValidationUnknown"
+ case ValidationInvalid:
+ return "ValidationInvalid"
+ case ValidationValid:
+ return "ValidationValid"
+ default:
+ return fmt.Sprintf("ValidationStatus(%d)", int(v))
+ }
+}
+
+// Validate determines whether the contents of the buffer are a valid wire encoding
+// of the message type.
+//
+// This function is exposed for testing.
+func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) {
+ mi, ok := mt.(*MessageInfo)
+ if !ok {
+ return out, ValidationUnknown
+ }
+ if in.Resolver == nil {
+ in.Resolver = preg.GlobalTypes
+ }
+ o, st := mi.validate(in.Buf, 0, unmarshalOptions{
+ flags: in.Flags,
+ resolver: in.Resolver,
+ })
+ if o.initialized {
+ out.Flags |= piface.UnmarshalInitialized
+ }
+ return out, st
+}
+
+type validationInfo struct {
+ mi *MessageInfo
+ typ validationType
+ keyType, valType validationType
+
+ // For non-required fields, requiredBit is 0.
+ //
+ // For required fields, requiredBit's nth bit is set, where n is a
+ // unique index in the range [0, MessageInfo.numRequiredFields).
+ //
+ // If there are more than 64 required fields, requiredBit is 0.
+ requiredBit uint64
+}
+
+type validationType uint8
+
+const (
+ validationTypeOther validationType = iota
+ validationTypeMessage
+ validationTypeGroup
+ validationTypeMap
+ validationTypeRepeatedVarint
+ validationTypeRepeatedFixed32
+ validationTypeRepeatedFixed64
+ validationTypeVarint
+ validationTypeFixed32
+ validationTypeFixed64
+ validationTypeBytes
+ validationTypeUTF8String
+ validationTypeMessageSetItem
+)
+
+func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo {
+ var vi validationInfo
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ switch fd.Kind() {
+ case pref.MessageKind:
+ vi.typ = validationTypeMessage
+ if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok {
+ vi.mi = getMessageInfo(ot.Field(0).Type)
+ }
+ case pref.GroupKind:
+ vi.typ = validationTypeGroup
+ if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok {
+ vi.mi = getMessageInfo(ot.Field(0).Type)
+ }
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ vi.typ = validationTypeUTF8String
+ }
+ }
+ default:
+ vi = newValidationInfo(fd, ft)
+ }
+ if fd.Cardinality() == pref.Required {
+ // Avoid overflow. The required field check is done with a 64-bit mask, with
+ // any message containing more than 64 required fields always reported as
+ // potentially uninitialized, so it is not important to get a precise count
+ // of the required fields past 64.
+ if mi.numRequiredFields < math.MaxUint8 {
+ mi.numRequiredFields++
+ vi.requiredBit = 1 << (mi.numRequiredFields - 1)
+ }
+ }
+ return vi
+}
+
+func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo {
+ var vi validationInfo
+ switch {
+ case fd.IsList():
+ switch fd.Kind() {
+ case pref.MessageKind:
+ vi.typ = validationTypeMessage
+ if ft.Kind() == reflect.Slice {
+ vi.mi = getMessageInfo(ft.Elem())
+ }
+ case pref.GroupKind:
+ vi.typ = validationTypeGroup
+ if ft.Kind() == reflect.Slice {
+ vi.mi = getMessageInfo(ft.Elem())
+ }
+ case pref.StringKind:
+ vi.typ = validationTypeBytes
+ if strs.EnforceUTF8(fd) {
+ vi.typ = validationTypeUTF8String
+ }
+ default:
+ switch wireTypes[fd.Kind()] {
+ case protowire.VarintType:
+ vi.typ = validationTypeRepeatedVarint
+ case protowire.Fixed32Type:
+ vi.typ = validationTypeRepeatedFixed32
+ case protowire.Fixed64Type:
+ vi.typ = validationTypeRepeatedFixed64
+ }
+ }
+ case fd.IsMap():
+ vi.typ = validationTypeMap
+ switch fd.MapKey().Kind() {
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ vi.keyType = validationTypeUTF8String
+ }
+ }
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind:
+ vi.valType = validationTypeMessage
+ if ft.Kind() == reflect.Map {
+ vi.mi = getMessageInfo(ft.Elem())
+ }
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ vi.valType = validationTypeUTF8String
+ }
+ }
+ default:
+ switch fd.Kind() {
+ case pref.MessageKind:
+ vi.typ = validationTypeMessage
+ if !fd.IsWeak() {
+ vi.mi = getMessageInfo(ft)
+ }
+ case pref.GroupKind:
+ vi.typ = validationTypeGroup
+ vi.mi = getMessageInfo(ft)
+ case pref.StringKind:
+ vi.typ = validationTypeBytes
+ if strs.EnforceUTF8(fd) {
+ vi.typ = validationTypeUTF8String
+ }
+ default:
+ switch wireTypes[fd.Kind()] {
+ case protowire.VarintType:
+ vi.typ = validationTypeVarint
+ case protowire.Fixed32Type:
+ vi.typ = validationTypeFixed32
+ case protowire.Fixed64Type:
+ vi.typ = validationTypeFixed64
+ case protowire.BytesType:
+ vi.typ = validationTypeBytes
+ }
+ }
+ }
+ return vi
+}
+
+func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) {
+ mi.init()
+ type validationState struct {
+ typ validationType
+ keyType, valType validationType
+ endGroup protowire.Number
+ mi *MessageInfo
+ tail []byte
+ requiredMask uint64
+ }
+
+ // Pre-allocate some slots to avoid repeated slice reallocation.
+ states := make([]validationState, 0, 16)
+ states = append(states, validationState{
+ typ: validationTypeMessage,
+ mi: mi,
+ })
+ if groupTag > 0 {
+ states[0].typ = validationTypeGroup
+ states[0].endGroup = groupTag
+ }
+ initialized := true
+ start := len(b)
+State:
+ for len(states) > 0 {
+ st := &states[len(states)-1]
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return out, ValidationInvalid
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+
+ if wtyp == protowire.EndGroupType {
+ if st.endGroup == num {
+ goto PopState
+ }
+ return out, ValidationInvalid
+ }
+ var vi validationInfo
+ switch {
+ case st.typ == validationTypeMap:
+ switch num {
+ case genid.MapEntry_Key_field_number:
+ vi.typ = st.keyType
+ case genid.MapEntry_Value_field_number:
+ vi.typ = st.valType
+ vi.mi = st.mi
+ vi.requiredBit = 1
+ }
+ case flags.ProtoLegacy && st.mi.isMessageSet:
+ switch num {
+ case messageset.FieldItem:
+ vi.typ = validationTypeMessageSetItem
+ }
+ default:
+ var f *coderFieldInfo
+ if int(num) < len(st.mi.denseCoderFields) {
+ f = st.mi.denseCoderFields[num]
+ } else {
+ f = st.mi.coderFields[num]
+ }
+ if f != nil {
+ vi = f.validation
+ if vi.typ == validationTypeMessage && vi.mi == nil {
+ // Probable weak field.
+ //
+ // TODO: Consider storing the results of this lookup somewhere
+ // rather than recomputing it on every validation.
+ fd := st.mi.Desc.Fields().ByNumber(num)
+ if fd == nil || !fd.IsWeak() {
+ break
+ }
+ messageName := fd.Message().FullName()
+ messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
+ switch err {
+ case nil:
+ vi.mi, _ = messageType.(*MessageInfo)
+ case preg.NotFound:
+ vi.typ = validationTypeBytes
+ default:
+ return out, ValidationUnknown
+ }
+ }
+ break
+ }
+ // Possible extension field.
+ //
+ // TODO: We should return ValidationUnknown when:
+ // 1. The resolver is not frozen. (More extensions may be added to it.)
+ // 2. The resolver returns preg.NotFound.
+ // In this case, a type added to the resolver in the future could cause
+ // unmarshaling to begin failing. Supporting this requires some way to
+ // determine if the resolver is frozen.
+ xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num)
+ if err != nil && err != preg.NotFound {
+ return out, ValidationUnknown
+ }
+ if err == nil {
+ vi = getExtensionFieldInfo(xt).validation
+ }
+ }
+ if vi.requiredBit != 0 {
+ // Check that the field has a compatible wire type.
+ // We only need to consider non-repeated field types,
+ // since repeated fields (and maps) can never be required.
+ ok := false
+ switch vi.typ {
+ case validationTypeVarint:
+ ok = wtyp == protowire.VarintType
+ case validationTypeFixed32:
+ ok = wtyp == protowire.Fixed32Type
+ case validationTypeFixed64:
+ ok = wtyp == protowire.Fixed64Type
+ case validationTypeBytes, validationTypeUTF8String, validationTypeMessage:
+ ok = wtyp == protowire.BytesType
+ case validationTypeGroup:
+ ok = wtyp == protowire.StartGroupType
+ }
+ if ok {
+ st.requiredMask |= vi.requiredBit
+ }
+ }
+
+ switch wtyp {
+ case protowire.VarintType:
+ if len(b) >= 10 {
+ switch {
+ case b[0] < 0x80:
+ b = b[1:]
+ case b[1] < 0x80:
+ b = b[2:]
+ case b[2] < 0x80:
+ b = b[3:]
+ case b[3] < 0x80:
+ b = b[4:]
+ case b[4] < 0x80:
+ b = b[5:]
+ case b[5] < 0x80:
+ b = b[6:]
+ case b[6] < 0x80:
+ b = b[7:]
+ case b[7] < 0x80:
+ b = b[8:]
+ case b[8] < 0x80:
+ b = b[9:]
+ case b[9] < 0x80 && b[9] < 2:
+ b = b[10:]
+ default:
+ return out, ValidationInvalid
+ }
+ } else {
+ switch {
+ case len(b) > 0 && b[0] < 0x80:
+ b = b[1:]
+ case len(b) > 1 && b[1] < 0x80:
+ b = b[2:]
+ case len(b) > 2 && b[2] < 0x80:
+ b = b[3:]
+ case len(b) > 3 && b[3] < 0x80:
+ b = b[4:]
+ case len(b) > 4 && b[4] < 0x80:
+ b = b[5:]
+ case len(b) > 5 && b[5] < 0x80:
+ b = b[6:]
+ case len(b) > 6 && b[6] < 0x80:
+ b = b[7:]
+ case len(b) > 7 && b[7] < 0x80:
+ b = b[8:]
+ case len(b) > 8 && b[8] < 0x80:
+ b = b[9:]
+ case len(b) > 9 && b[9] < 2:
+ b = b[10:]
+ default:
+ return out, ValidationInvalid
+ }
+ }
+ continue State
+ case protowire.BytesType:
+ var size uint64
+ if len(b) >= 1 && b[0] < 0x80 {
+ size = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ size = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ size, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ b = b[n:]
+ }
+ if size > uint64(len(b)) {
+ return out, ValidationInvalid
+ }
+ v := b[:size]
+ b = b[size:]
+ switch vi.typ {
+ case validationTypeMessage:
+ if vi.mi == nil {
+ return out, ValidationUnknown
+ }
+ vi.mi.init()
+ fallthrough
+ case validationTypeMap:
+ if vi.mi != nil {
+ vi.mi.init()
+ }
+ states = append(states, validationState{
+ typ: vi.typ,
+ keyType: vi.keyType,
+ valType: vi.valType,
+ mi: vi.mi,
+ tail: b,
+ })
+ b = v
+ continue State
+ case validationTypeRepeatedVarint:
+ // Packed field.
+ for len(v) > 0 {
+ _, n := protowire.ConsumeVarint(v)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ v = v[n:]
+ }
+ case validationTypeRepeatedFixed32:
+ // Packed field.
+ if len(v)%4 != 0 {
+ return out, ValidationInvalid
+ }
+ case validationTypeRepeatedFixed64:
+ // Packed field.
+ if len(v)%8 != 0 {
+ return out, ValidationInvalid
+ }
+ case validationTypeUTF8String:
+ if !utf8.Valid(v) {
+ return out, ValidationInvalid
+ }
+ }
+ case protowire.Fixed32Type:
+ if len(b) < 4 {
+ return out, ValidationInvalid
+ }
+ b = b[4:]
+ case protowire.Fixed64Type:
+ if len(b) < 8 {
+ return out, ValidationInvalid
+ }
+ b = b[8:]
+ case protowire.StartGroupType:
+ switch {
+ case vi.typ == validationTypeGroup:
+ if vi.mi == nil {
+ return out, ValidationUnknown
+ }
+ vi.mi.init()
+ states = append(states, validationState{
+ typ: validationTypeGroup,
+ mi: vi.mi,
+ endGroup: num,
+ })
+ continue State
+ case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem:
+ typeid, v, n, err := messageset.ConsumeFieldValue(b, false)
+ if err != nil {
+ return out, ValidationInvalid
+ }
+ xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid)
+ switch {
+ case err == preg.NotFound:
+ b = b[n:]
+ case err != nil:
+ return out, ValidationUnknown
+ default:
+ xvi := getExtensionFieldInfo(xt).validation
+ if xvi.mi != nil {
+ xvi.mi.init()
+ }
+ states = append(states, validationState{
+ typ: xvi.typ,
+ mi: xvi.mi,
+ tail: b[n:],
+ })
+ b = v
+ continue State
+ }
+ default:
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ b = b[n:]
+ }
+ default:
+ return out, ValidationInvalid
+ }
+ }
+ if st.endGroup != 0 {
+ return out, ValidationInvalid
+ }
+ if len(b) != 0 {
+ return out, ValidationInvalid
+ }
+ b = st.tail
+ PopState:
+ numRequiredFields := 0
+ switch st.typ {
+ case validationTypeMessage, validationTypeGroup:
+ numRequiredFields = int(st.mi.numRequiredFields)
+ case validationTypeMap:
+ // If this is a map field with a message value that contains
+ // required fields, require that the value be present.
+ if st.mi != nil && st.mi.numRequiredFields > 0 {
+ numRequiredFields = 1
+ }
+ }
+ // If there are more than 64 required fields, this check will
+ // always fail and we will report that the message is potentially
+ // uninitialized.
+ if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields {
+ initialized = false
+ }
+ states = states[:len(states)-1]
+ }
+ out.n = start - len(b)
+ if initialized {
+ out.initialized = true
+ }
+ return out, ValidationValid
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
new file mode 100644
index 0000000..009cbef
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go
@@ -0,0 +1,74 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// weakFields adds methods to the exported WeakFields type for internal use.
+//
+// The exported type is an alias to an unnamed type, so methods can't be
+// defined directly on it.
+type weakFields WeakFields
+
+func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) {
+ m, ok := w[int32(num)]
+ return m, ok
+}
+
+func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) {
+ if *w == nil {
+ *w = make(weakFields)
+ }
+ (*w)[int32(num)] = m
+}
+
+func (w *weakFields) clear(num pref.FieldNumber) {
+ delete(*w, int32(num))
+}
+
+func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool {
+ _, ok := w[int32(num)]
+ return ok
+}
+
+func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) {
+ delete(*w, int32(num))
+}
+
+func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage {
+ if m, ok := w[int32(num)]; ok {
+ return m
+ }
+ mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
+ if mt == nil {
+ panic(fmt.Sprintf("message %v for weak field is not linked in", name))
+ }
+ return mt.Zero().Interface()
+}
+
+func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) {
+ if m != nil {
+ mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
+ if mt == nil {
+ panic(fmt.Sprintf("message %v for weak field is not linked in", name))
+ }
+ if mt != m.ProtoReflect().Type() {
+ panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
+ }
+ }
+ if m == nil || !m.ProtoReflect().IsValid() {
+ delete(*w, int32(num))
+ return
+ }
+ if *w == nil {
+ *w = make(weakFields)
+ }
+ (*w)[int32(num)] = m
+}
diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go
new file mode 100644
index 0000000..2a24953
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/order/order.go
@@ -0,0 +1,89 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package order
+
+import (
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// FieldOrder specifies the ordering to visit message fields.
+// It is a function that reports whether x is ordered before y.
+type FieldOrder func(x, y pref.FieldDescriptor) bool
+
+var (
+ // AnyFieldOrder specifies no specific field ordering.
+ AnyFieldOrder FieldOrder = nil
+
+ // LegacyFieldOrder sorts fields in the same ordering as emitted by
+ // wire serialization in the github.com/golang/protobuf implementation.
+ LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+ ox, oy := x.ContainingOneof(), y.ContainingOneof()
+ inOneof := func(od pref.OneofDescriptor) bool {
+ return od != nil && !od.IsSynthetic()
+ }
+
+ // Extension fields sort before non-extension fields.
+ if x.IsExtension() != y.IsExtension() {
+ return x.IsExtension() && !y.IsExtension()
+ }
+ // Fields not within a oneof sort before those within a oneof.
+ if inOneof(ox) != inOneof(oy) {
+ return !inOneof(ox) && inOneof(oy)
+ }
+ // Fields in disjoint oneof sets are sorted by declaration index.
+ if ox != nil && oy != nil && ox != oy {
+ return ox.Index() < oy.Index()
+ }
+ // Fields sorted by field number.
+ return x.Number() < y.Number()
+ }
+
+ // NumberFieldOrder sorts fields by their field number.
+ NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+ return x.Number() < y.Number()
+ }
+
+ // IndexNameFieldOrder sorts non-extension fields before extension fields.
+ // Non-extensions are sorted according to their declaration index.
+ // Extensions are sorted according to their full name.
+ IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+ // Non-extension fields sort before extension fields.
+ if x.IsExtension() != y.IsExtension() {
+ return !x.IsExtension() && y.IsExtension()
+ }
+ // Extensions sorted by fullname.
+ if x.IsExtension() && y.IsExtension() {
+ return x.FullName() < y.FullName()
+ }
+ // Non-extensions sorted by declaration index.
+ return x.Index() < y.Index()
+ }
+)
+
+// KeyOrder specifies the ordering to visit map entries.
+// It is a function that reports whether x is ordered before y.
+type KeyOrder func(x, y pref.MapKey) bool
+
+var (
+ // AnyKeyOrder specifies no specific key ordering.
+ AnyKeyOrder KeyOrder = nil
+
+ // GenericKeyOrder sorts false before true, numeric keys in ascending order,
+ // and strings in lexicographical ordering according to UTF-8 codepoints.
+ GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool {
+ switch x.Interface().(type) {
+ case bool:
+ return !x.Bool() && y.Bool()
+ case int32, int64:
+ return x.Int() < y.Int()
+ case uint32, uint64:
+ return x.Uint() < y.Uint()
+ case string:
+ return x.String() < y.String()
+ default:
+ panic("invalid map key type")
+ }
+ }
+)
diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go
new file mode 100644
index 0000000..c8090e0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/order/range.go
@@ -0,0 +1,115 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package order provides ordered access to messages and maps.
+package order
+
+import (
+ "sort"
+ "sync"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type messageField struct {
+ fd pref.FieldDescriptor
+ v pref.Value
+}
+
+var messageFieldPool = sync.Pool{
+ New: func() interface{} { return new([]messageField) },
+}
+
+type (
+ // FieldRnger is an interface for visiting all fields in a message.
+ // The protoreflect.Message type implements this interface.
+ FieldRanger interface{ Range(VisitField) }
+ // VisitField is called everytime a message field is visited.
+ VisitField = func(pref.FieldDescriptor, pref.Value) bool
+)
+
+// RangeFields iterates over the fields of fs according to the specified order.
+func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) {
+ if less == nil {
+ fs.Range(fn)
+ return
+ }
+
+ // Obtain a pre-allocated scratch buffer.
+ p := messageFieldPool.Get().(*[]messageField)
+ fields := (*p)[:0]
+ defer func() {
+ if cap(fields) < 1024 {
+ *p = fields
+ messageFieldPool.Put(p)
+ }
+ }()
+
+ // Collect all fields in the message and sort them.
+ fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
+ fields = append(fields, messageField{fd, v})
+ return true
+ })
+ sort.Slice(fields, func(i, j int) bool {
+ return less(fields[i].fd, fields[j].fd)
+ })
+
+ // Visit the fields in the specified ordering.
+ for _, f := range fields {
+ if !fn(f.fd, f.v) {
+ return
+ }
+ }
+}
+
+type mapEntry struct {
+ k pref.MapKey
+ v pref.Value
+}
+
+var mapEntryPool = sync.Pool{
+ New: func() interface{} { return new([]mapEntry) },
+}
+
+type (
+ // EntryRanger is an interface for visiting all fields in a message.
+ // The protoreflect.Map type implements this interface.
+ EntryRanger interface{ Range(VisitEntry) }
+ // VisitEntry is called everytime a map entry is visited.
+ VisitEntry = func(pref.MapKey, pref.Value) bool
+)
+
+// RangeEntries iterates over the entries of es according to the specified order.
+func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) {
+ if less == nil {
+ es.Range(fn)
+ return
+ }
+
+ // Obtain a pre-allocated scratch buffer.
+ p := mapEntryPool.Get().(*[]mapEntry)
+ entries := (*p)[:0]
+ defer func() {
+ if cap(entries) < 1024 {
+ *p = entries
+ mapEntryPool.Put(p)
+ }
+ }()
+
+ // Collect all entries in the map and sort them.
+ es.Range(func(k pref.MapKey, v pref.Value) bool {
+ entries = append(entries, mapEntry{k, v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ return less(entries[i].k, entries[j].k)
+ })
+
+ // Visit the entries in the specified ordering.
+ for _, e := range entries {
+ if !fn(e.k, e.v) {
+ return
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go
new file mode 100644
index 0000000..49dc4fc
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pragma provides types that can be embedded into a struct to
+// statically enforce or prevent certain language properties.
+package pragma
+
+import "sync"
+
+// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals.
+type NoUnkeyedLiterals struct{}
+
+// DoNotImplement can be embedded in an interface to prevent trivial
+// implementations of the interface.
+//
+// This is useful to prevent unauthorized implementations of an interface
+// so that it can be extended in the future for any protobuf language changes.
+type DoNotImplement interface{ ProtoInternal(DoNotImplement) }
+
+// DoNotCompare can be embedded in a struct to prevent comparability.
+type DoNotCompare [0]func()
+
+// DoNotCopy can be embedded in a struct to help prevent shallow copies.
+// This does not rely on a Go language feature, but rather a special case
+// within the vet checker.
+//
+// See https://golang.org/issues/8005.
+type DoNotCopy [0]sync.Mutex
diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go
new file mode 100644
index 0000000..d3d7f89
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/set/ints.go
@@ -0,0 +1,58 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package set provides simple set data structures for uint64s.
+package set
+
+import "math/bits"
+
+// int64s represents a set of integers within the range of 0..63.
+type int64s uint64
+
+func (bs *int64s) Len() int {
+ return bits.OnesCount64(uint64(*bs))
+}
+func (bs *int64s) Has(n uint64) bool {
+ return uint64(*bs)&(uint64(1)< 0
+}
+func (bs *int64s) Set(n uint64) {
+ *(*uint64)(bs) |= uint64(1) << n
+}
+func (bs *int64s) Clear(n uint64) {
+ *(*uint64)(bs) &^= uint64(1) << n
+}
+
+// Ints represents a set of integers within the range of 0..math.MaxUint64.
+type Ints struct {
+ lo int64s
+ hi map[uint64]struct{}
+}
+
+func (bs *Ints) Len() int {
+ return bs.lo.Len() + len(bs.hi)
+}
+func (bs *Ints) Has(n uint64) bool {
+ if n < 64 {
+ return bs.lo.Has(n)
+ }
+ _, ok := bs.hi[n]
+ return ok
+}
+func (bs *Ints) Set(n uint64) {
+ if n < 64 {
+ bs.lo.Set(n)
+ return
+ }
+ if bs.hi == nil {
+ bs.hi = make(map[uint64]struct{})
+ }
+ bs.hi[n] = struct{}{}
+}
+func (bs *Ints) Clear(n uint64) {
+ if n < 64 {
+ bs.lo.Clear(n)
+ return
+ }
+ delete(bs.hi, n)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go
new file mode 100644
index 0000000..0b74e76
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package strs provides string manipulation functionality specific to protobuf.
+package strs
+
+import (
+ "go/token"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// EnforceUTF8 reports whether to enforce strict UTF-8 validation.
+func EnforceUTF8(fd protoreflect.FieldDescriptor) bool {
+ if flags.ProtoLegacy {
+ if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok {
+ return fd.EnforceUTF8()
+ }
+ }
+ return fd.Syntax() == protoreflect.Proto3
+}
+
+// GoCamelCase camel-cases a protobuf name for use as a Go identifier.
+//
+// If there is an interior underscore followed by a lower case letter,
+// drop the underscore and convert the letter to upper case.
+func GoCamelCase(s string) string {
+ // Invariant: if the next letter is lower case, it must be converted
+ // to upper case.
+ // That is, we process a word at a time, where words are marked by _ or
+ // upper case letter. Digits are treated as words.
+ var b []byte
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ switch {
+ case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]):
+ // Skip over '.' in ".{{lowercase}}".
+ case c == '.':
+ b = append(b, '_') // convert '.' to '_'
+ case c == '_' && (i == 0 || s[i-1] == '.'):
+ // Convert initial '_' to ensure we start with a capital letter.
+ // Do the same for '_' after '.' to match historic behavior.
+ b = append(b, 'X') // convert '_' to 'X'
+ case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]):
+ // Skip over '_' in "_{{lowercase}}".
+ case isASCIIDigit(c):
+ b = append(b, c)
+ default:
+ // Assume we have a letter now - if not, it's a bogus identifier.
+ // The next word is a sequence of characters that must start upper case.
+ if isASCIILower(c) {
+ c -= 'a' - 'A' // convert lowercase to uppercase
+ }
+ b = append(b, c)
+
+ // Accept lower case sequence that follows.
+ for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ {
+ b = append(b, s[i+1])
+ }
+ }
+ }
+ return string(b)
+}
+
+// GoSanitized converts a string to a valid Go identifier.
+func GoSanitized(s string) string {
+ // Sanitize the input to the set of valid characters,
+ // which must be '_' or be in the Unicode L or N categories.
+ s = strings.Map(func(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ return '_'
+ }, s)
+
+ // Prepend '_' in the event of a Go keyword conflict or if
+ // the identifier is invalid (does not start in the Unicode L category).
+ r, _ := utf8.DecodeRuneInString(s)
+ if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) {
+ return "_" + s
+ }
+ return s
+}
+
+// JSONCamelCase converts a snake_case identifier to a camelCase identifier,
+// according to the protobuf JSON specification.
+func JSONCamelCase(s string) string {
+ var b []byte
+ var wasUnderscore bool
+ for i := 0; i < len(s); i++ { // proto identifiers are always ASCII
+ c := s[i]
+ if c != '_' {
+ if wasUnderscore && isASCIILower(c) {
+ c -= 'a' - 'A' // convert to uppercase
+ }
+ b = append(b, c)
+ }
+ wasUnderscore = c == '_'
+ }
+ return string(b)
+}
+
+// JSONSnakeCase converts a camelCase identifier to a snake_case identifier,
+// according to the protobuf JSON specification.
+func JSONSnakeCase(s string) string {
+ var b []byte
+ for i := 0; i < len(s); i++ { // proto identifiers are always ASCII
+ c := s[i]
+ if isASCIIUpper(c) {
+ b = append(b, '_')
+ c += 'a' - 'A' // convert to lowercase
+ }
+ b = append(b, c)
+ }
+ return string(b)
+}
+
+// MapEntryName derives the name of the map entry message given the field name.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057
+func MapEntryName(s string) string {
+ var b []byte
+ upperNext := true
+ for _, c := range s {
+ switch {
+ case c == '_':
+ upperNext = true
+ case upperNext:
+ b = append(b, byte(unicode.ToUpper(c)))
+ upperNext = false
+ default:
+ b = append(b, byte(c))
+ }
+ }
+ b = append(b, "Entry"...)
+ return string(b)
+}
+
+// EnumValueName derives the camel-cased enum value name.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313
+func EnumValueName(s string) string {
+ var b []byte
+ upperNext := true
+ for _, c := range s {
+ switch {
+ case c == '_':
+ upperNext = true
+ case upperNext:
+ b = append(b, byte(unicode.ToUpper(c)))
+ upperNext = false
+ default:
+ b = append(b, byte(unicode.ToLower(c)))
+ upperNext = false
+ }
+ }
+ return string(b)
+}
+
+// TrimEnumPrefix trims the enum name prefix from an enum value name,
+// where the prefix is all lowercase without underscores.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375
+func TrimEnumPrefix(s, prefix string) string {
+ s0 := s // original input
+ for len(s) > 0 && len(prefix) > 0 {
+ if s[0] == '_' {
+ s = s[1:]
+ continue
+ }
+ if unicode.ToLower(rune(s[0])) != rune(prefix[0]) {
+ return s0 // no prefix match
+ }
+ s, prefix = s[1:], prefix[1:]
+ }
+ if len(prefix) > 0 {
+ return s0 // no prefix match
+ }
+ s = strings.TrimLeft(s, "_")
+ if len(s) == 0 {
+ return s0 // avoid returning empty string
+ }
+ return s
+}
+
+func isASCIILower(c byte) bool {
+ return 'a' <= c && c <= 'z'
+}
+func isASCIIUpper(c byte) bool {
+ return 'A' <= c && c <= 'Z'
+}
+func isASCIIDigit(c byte) bool {
+ return '0' <= c && c <= '9'
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
new file mode 100644
index 0000000..85e074c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package strs
+
+import pref "google.golang.org/protobuf/reflect/protoreflect"
+
+func UnsafeString(b []byte) string {
+ return string(b)
+}
+
+func UnsafeBytes(s string) []byte {
+ return []byte(s)
+}
+
+type Builder struct{}
+
+func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
+ return prefix.Append(name)
+}
+
+func (*Builder) MakeString(b []byte) string {
+ return string(b)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
new file mode 100644
index 0000000..2160c70
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -0,0 +1,94 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package strs
+
+import (
+ "unsafe"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type (
+ stringHeader struct {
+ Data unsafe.Pointer
+ Len int
+ }
+ sliceHeader struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+ }
+)
+
+// UnsafeString returns an unsafe string reference of b.
+// The caller must treat the input slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user
+// unless the input slice is provably immutable.
+func UnsafeString(b []byte) (s string) {
+ src := (*sliceHeader)(unsafe.Pointer(&b))
+ dst := (*stringHeader)(unsafe.Pointer(&s))
+ dst.Data = src.Data
+ dst.Len = src.Len
+ return s
+}
+
+// UnsafeBytes returns an unsafe bytes slice reference of s.
+// The caller must treat returned slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user.
+func UnsafeBytes(s string) (b []byte) {
+ src := (*stringHeader)(unsafe.Pointer(&s))
+ dst := (*sliceHeader)(unsafe.Pointer(&b))
+ dst.Data = src.Data
+ dst.Len = src.Len
+ dst.Cap = src.Len
+ return b
+}
+
+// Builder builds a set of strings with shared lifetime.
+// This differs from strings.Builder, which is for building a single string.
+type Builder struct {
+ buf []byte
+}
+
+// AppendFullName is equivalent to protoreflect.FullName.Append,
+// but optimized for large batches where each name has a shared lifetime.
+func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
+ n := len(prefix) + len(".") + len(name)
+ if len(prefix) == 0 {
+ n -= len(".")
+ }
+ sb.grow(n)
+ sb.buf = append(sb.buf, prefix...)
+ sb.buf = append(sb.buf, '.')
+ sb.buf = append(sb.buf, name...)
+ return pref.FullName(sb.last(n))
+}
+
+// MakeString is equivalent to string(b), but optimized for large batches
+// with a shared lifetime.
+func (sb *Builder) MakeString(b []byte) string {
+ sb.grow(len(b))
+ sb.buf = append(sb.buf, b...)
+ return sb.last(len(b))
+}
+
+func (sb *Builder) grow(n int) {
+ if cap(sb.buf)-len(sb.buf) >= n {
+ return
+ }
+
+ // Unlike strings.Builder, we do not need to copy over the contents
+ // of the old buffer since our builder provides no API for
+ // retrieving previously created strings.
+ sb.buf = make([]byte, 2*(cap(sb.buf)+n))
+}
+
+func (sb *Builder) last(n int) string {
+ return UnsafeString(sb.buf[len(sb.buf)-n:])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
new file mode 100644
index 0000000..5879131
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -0,0 +1,79 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package version records versioning information about this module.
+package version
+
+import (
+ "fmt"
+ "strings"
+)
+
+// These constants determine the current version of this module.
+//
+//
+// For our release process, we enforce the following rules:
+// * Tagged releases use a tag that is identical to String.
+// * Tagged releases never reference a commit where the String
+// contains "devel".
+// * The set of all commits in this repository where String
+// does not contain "devel" must have a unique String.
+//
+//
+// Steps for tagging a new release:
+// 1. Create a new CL.
+//
+// 2. Update Minor, Patch, and/or PreRelease as necessary.
+// PreRelease must not contain the string "devel".
+//
+// 3. Since the last released minor version, have there been any changes to
+// generator that relies on new functionality in the runtime?
+// If yes, then increment RequiredGenerated.
+//
+// 4. Since the last released minor version, have there been any changes to
+// the runtime that removes support for old .pb.go source code?
+// If yes, then increment SupportMinimum.
+//
+// 5. Send out the CL for review and submit it.
+// Note that the next CL in step 8 must be submitted after this CL
+// without any other CLs in-between.
+//
+// 6. Tag a new version, where the tag is is the current String.
+//
+// 7. Write release notes for all notable changes
+// between this release and the last release.
+//
+// 8. Create a new CL.
+//
+// 9. Update PreRelease to include the string "devel".
+// For example: "" -> "devel" or "rc.1" -> "rc.1.devel"
+//
+// 10. Send out the CL for review and submit it.
+const (
+ Major = 1
+ Minor = 26
+ Patch = 0
+ PreRelease = ""
+)
+
+// String formats the version string for this module in semver format.
+//
+// Examples:
+// v1.20.1
+// v1.21.0-rc.1
+func String() string {
+ v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch)
+ if PreRelease != "" {
+ v += "-" + PreRelease
+
+ // TODO: Add metadata about the commit or build hash.
+ // See https://golang.org/issue/29814
+ // See https://golang.org/issue/33533
+ var metadata string
+ if strings.Contains(PreRelease, "devel") && metadata != "" {
+ v += "+" + metadata
+ }
+ }
+ return v
+}
diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go
new file mode 100644
index 0000000..3e9a6a2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/checkinit.go
@@ -0,0 +1,71 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// CheckInitialized returns an error if any required fields in m are not set.
+func CheckInitialized(m Message) error {
+ // Treat a nil message interface as an "untyped" empty message,
+ // which we assume to have no required fields.
+ if m == nil {
+ return nil
+ }
+
+ return checkInitialized(m.ProtoReflect())
+}
+
+// CheckInitialized returns an error if any required fields in m are not set.
+func checkInitialized(m protoreflect.Message) error {
+ if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil {
+ _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{
+ Message: m,
+ })
+ return err
+ }
+ return checkInitializedSlow(m)
+}
+
+func checkInitializedSlow(m protoreflect.Message) error {
+ md := m.Descriptor()
+ fds := md.Fields()
+ for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ {
+ fd := fds.ByNumber(nums.Get(i))
+ if !m.Has(fd) {
+ return errors.RequiredNotSet(string(fd.FullName()))
+ }
+ }
+ var err error
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ case fd.IsList():
+ if fd.Message() == nil {
+ return true
+ }
+ for i, list := 0, v.List(); i < list.Len() && err == nil; i++ {
+ err = checkInitialized(list.Get(i).Message())
+ }
+ case fd.IsMap():
+ if fd.MapValue().Message() == nil {
+ return true
+ }
+ v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool {
+ err = checkInitialized(v.Message())
+ return err == nil
+ })
+ default:
+ if fd.Message() == nil {
+ return true
+ }
+ err = checkInitialized(v.Message())
+ }
+ return err == nil
+ })
+ return err
+}
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
new file mode 100644
index 0000000..49f9b8c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -0,0 +1,278 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// UnmarshalOptions configures the unmarshaler.
+//
+// Example usage:
+// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m)
+type UnmarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // Merge merges the input into the destination message.
+ // The default behavior is to always reset the message before unmarshaling,
+ // unless Merge is specified.
+ Merge bool
+
+ // AllowPartial accepts input for messages that will result in missing
+ // required fields. If AllowPartial is false (the default), Unmarshal will
+ // return an error if there are any missing required fields.
+ AllowPartial bool
+
+ // If DiscardUnknown is set, unknown fields are ignored.
+ DiscardUnknown bool
+
+ // Resolver is used for looking up types when unmarshaling extension fields.
+ // If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+ }
+}
+
+// Unmarshal parses the wire-format message in b and places the result in m.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func Unmarshal(b []byte, m Message) error {
+ _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect())
+ return err
+}
+
+// Unmarshal parses the wire-format message in b and places the result in m.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
+ _, err := o.unmarshal(b, m.ProtoReflect())
+ return err
+}
+
+// UnmarshalState parses a wire-format message and places the result in m.
+//
+// This method permits fine-grained control over the unmarshaler.
+// Most users should use Unmarshal instead.
+func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
+ return o.unmarshal(in.Buf, in.Message)
+}
+
+// unmarshal is a centralized function that all unmarshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for unmarshal that do not go through this.
+func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) {
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+ if !o.Merge {
+ Reset(m.Interface())
+ }
+ allowPartial := o.AllowPartial
+ o.Merge = true
+ o.AllowPartial = true
+ methods := protoMethods(m)
+ if methods != nil && methods.Unmarshal != nil &&
+ !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) {
+ in := protoiface.UnmarshalInput{
+ Message: m,
+ Buf: b,
+ Resolver: o.Resolver,
+ }
+ if o.DiscardUnknown {
+ in.Flags |= protoiface.UnmarshalDiscardUnknown
+ }
+ out, err = methods.Unmarshal(in)
+ } else {
+ err = o.unmarshalMessageSlow(b, m)
+ }
+ if err != nil {
+ return out, err
+ }
+ if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) {
+ return out, nil
+ }
+ return out, checkInitialized(m)
+}
+
+func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error {
+ _, err := o.unmarshal(b, m)
+ return err
+}
+
+func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error {
+ md := m.Descriptor()
+ if messageset.IsMessageSet(md) {
+ return o.unmarshalMessageSet(b, m)
+ }
+ fields := md.Fields()
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ num, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return errDecode
+ }
+ if num > protowire.MaxValidNumber {
+ return errDecode
+ }
+
+ // Find the field descriptor for this field number.
+ fd := fields.ByNumber(num)
+ if fd == nil && md.ExtensionRanges().Has(num) {
+ extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num)
+ if err != nil && err != protoregistry.NotFound {
+ return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err)
+ }
+ if extType != nil {
+ fd = extType.TypeDescriptor()
+ }
+ }
+ var err error
+ if fd == nil {
+ err = errUnknown
+ } else if flags.ProtoLegacy {
+ if fd.IsWeak() && fd.Message().IsPlaceholder() {
+ err = errUnknown // weak referent is not linked in
+ }
+ }
+
+ // Parse the field value.
+ var valLen int
+ switch {
+ case err != nil:
+ case fd.IsList():
+ valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd)
+ case fd.IsMap():
+ valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd)
+ default:
+ valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd)
+ }
+ if err != nil {
+ if err != errUnknown {
+ return err
+ }
+ valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:])
+ if valLen < 0 {
+ return errDecode
+ }
+ if !o.DiscardUnknown {
+ m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...))
+ }
+ }
+ b = b[tagLen+valLen:]
+ }
+ return nil
+}
+
+func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) {
+ v, n, err := o.unmarshalScalar(b, wtyp, fd)
+ if err != nil {
+ return 0, err
+ }
+ switch fd.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ m2 := m.Mutable(fd).Message()
+ if err := o.unmarshalMessage(v.Bytes(), m2); err != nil {
+ return n, err
+ }
+ default:
+ // Non-message scalars replace the previous value.
+ m.Set(fd, v)
+ }
+ return n, nil
+}
+
+func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) {
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ b, n = protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ var (
+ keyField = fd.MapKey()
+ valField = fd.MapValue()
+ key protoreflect.Value
+ val protoreflect.Value
+ haveKey bool
+ haveVal bool
+ )
+ switch valField.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ val = mapv.NewValue()
+ }
+ // Map entries are represented as a two-element message with fields
+ // containing the key and value.
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ if num > protowire.MaxValidNumber {
+ return 0, errDecode
+ }
+ b = b[n:]
+ err = errUnknown
+ switch num {
+ case genid.MapEntry_Key_field_number:
+ key, n, err = o.unmarshalScalar(b, wtyp, keyField)
+ if err != nil {
+ break
+ }
+ haveKey = true
+ case genid.MapEntry_Value_field_number:
+ var v protoreflect.Value
+ v, n, err = o.unmarshalScalar(b, wtyp, valField)
+ if err != nil {
+ break
+ }
+ switch valField.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil {
+ return 0, err
+ }
+ default:
+ val = v
+ }
+ haveVal = true
+ }
+ if err == errUnknown {
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ } else if err != nil {
+ return 0, err
+ }
+ b = b[n:]
+ }
+ // Every map entry should have entries for key and value, but this is not strictly required.
+ if !haveKey {
+ key = keyField.Default()
+ }
+ if !haveVal {
+ switch valField.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ default:
+ val = valField.Default()
+ }
+ }
+ mapv.Set(key.MapKey(), val)
+ return n, nil
+}
+
+// errUnknown is used internally to indicate fields which should be added
+// to the unknown field set of a message. It is never returned from an exported
+// function.
+var errUnknown = errors.New("BUG: internal error (unknown)")
+
+var errDecode = errors.New("cannot parse invalid wire-format data")
diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go
new file mode 100644
index 0000000..301eeb2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go
@@ -0,0 +1,603 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package proto
+
+import (
+ "math"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// unmarshalScalar decodes a value of the given kind.
+//
+// Message values are decoded into a []byte which aliases the input data.
+func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil
+ case protoreflect.EnumKind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil
+ case protoreflect.Int32Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfInt32(int32(v)), n, nil
+ case protoreflect.Sint32Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil
+ case protoreflect.Uint32Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfUint32(uint32(v)), n, nil
+ case protoreflect.Int64Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfInt64(int64(v)), n, nil
+ case protoreflect.Sint64Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil
+ case protoreflect.Uint64Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfUint64(v), n, nil
+ case protoreflect.Sfixed32Kind:
+ if wtyp != protowire.Fixed32Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfInt32(int32(v)), n, nil
+ case protoreflect.Fixed32Kind:
+ if wtyp != protowire.Fixed32Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfUint32(uint32(v)), n, nil
+ case protoreflect.FloatKind:
+ if wtyp != protowire.Fixed32Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil
+ case protoreflect.Sfixed64Kind:
+ if wtyp != protowire.Fixed64Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfInt64(int64(v)), n, nil
+ case protoreflect.Fixed64Kind:
+ if wtyp != protowire.Fixed64Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfUint64(v), n, nil
+ case protoreflect.DoubleKind:
+ if wtyp != protowire.Fixed64Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil
+ case protoreflect.StringKind:
+ if wtyp != protowire.BytesType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
+ return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName()))
+ }
+ return protoreflect.ValueOfString(string(v)), n, nil
+ case protoreflect.BytesKind:
+ if wtyp != protowire.BytesType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil
+ case protoreflect.MessageKind:
+ if wtyp != protowire.BytesType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfBytes(v), n, nil
+ case protoreflect.GroupKind:
+ if wtyp != protowire.StartGroupType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeGroup(fd.Number(), b)
+ if n < 0 {
+ return val, 0, errDecode
+ }
+ return protoreflect.ValueOfBytes(v), n, nil
+ default:
+ return val, 0, errUnknown
+ }
+}
+
+func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ return n, nil
+ case protoreflect.EnumKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ return n, nil
+ case protoreflect.Int32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ return n, nil
+ case protoreflect.Sint32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ return n, nil
+ case protoreflect.Uint32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ return n, nil
+ case protoreflect.Int64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ return n, nil
+ case protoreflect.Sint64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ return n, nil
+ case protoreflect.Uint64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint64(v))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ return n, nil
+ case protoreflect.Sfixed32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed32(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ return n, nil
+ case protoreflect.Fixed32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed32(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ return n, nil
+ case protoreflect.FloatKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed32(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ return n, nil
+ case protoreflect.Sfixed64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed64(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ return n, nil
+ case protoreflect.Fixed64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed64(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint64(v))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ return n, nil
+ case protoreflect.DoubleKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed64(buf)
+ if n < 0 {
+ return 0, errDecode
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ return n, nil
+ case protoreflect.StringKind:
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
+ return 0, errors.InvalidUTF8(string(fd.FullName()))
+ }
+ list.Append(protoreflect.ValueOfString(string(v)))
+ return n, nil
+ case protoreflect.BytesKind:
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
+ return n, nil
+ case protoreflect.MessageKind:
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ m := list.NewElement()
+ if err := o.unmarshalMessage(v, m.Message()); err != nil {
+ return 0, err
+ }
+ list.Append(m)
+ return n, nil
+ case protoreflect.GroupKind:
+ if wtyp != protowire.StartGroupType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeGroup(fd.Number(), b)
+ if n < 0 {
+ return 0, errDecode
+ }
+ m := list.NewElement()
+ if err := o.unmarshalMessage(v, m.Message()); err != nil {
+ return 0, err
+ }
+ list.Append(m)
+ return n, nil
+ default:
+ return 0, errUnknown
+ }
+}
+
+// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices.
+var emptyBuf [0]byte
diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go
new file mode 100644
index 0000000..c52d8c4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/doc.go
@@ -0,0 +1,94 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functions operating on protocol buffer messages.
+//
+// For documentation on protocol buffers in general, see:
+//
+// https://developers.google.com/protocol-buffers
+//
+// For a tutorial on using protocol buffers with Go, see:
+//
+// https://developers.google.com/protocol-buffers/docs/gotutorial
+//
+// For a guide to generated Go protocol buffer code, see:
+//
+// https://developers.google.com/protocol-buffers/docs/reference/go-generated
+//
+//
+// Binary serialization
+//
+// This package contains functions to convert to and from the wire format,
+// an efficient binary serialization of protocol buffers.
+//
+// • Size reports the size of a message in the wire format.
+//
+// • Marshal converts a message to the wire format.
+// The MarshalOptions type provides more control over wire marshaling.
+//
+// • Unmarshal converts a message from the wire format.
+// The UnmarshalOptions type provides more control over wire unmarshaling.
+//
+//
+// Basic message operations
+//
+// • Clone makes a deep copy of a message.
+//
+// • Merge merges the content of a message into another.
+//
+// • Equal compares two messages. For more control over comparisons
+// and detailed reporting of differences, see package
+// "google.golang.org/protobuf/testing/protocmp".
+//
+// • Reset clears the content of a message.
+//
+// • CheckInitialized reports whether all required fields in a message are set.
+//
+//
+// Optional scalar constructors
+//
+// The API for some generated messages represents optional scalar fields
+// as pointers to a value. For example, an optional string field has the
+// Go type *string.
+//
+// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String
+// take a value and return a pointer to a new instance of it,
+// to simplify construction of optional field values.
+//
+// Generated enum types usually have an Enum method which performs the
+// same operation.
+//
+// Optional scalar fields are only supported in proto2.
+//
+//
+// Extension accessors
+//
+// • HasExtension, GetExtension, SetExtension, and ClearExtension
+// access extension field values in a protocol buffer message.
+//
+// Extension fields are only supported in proto2.
+//
+//
+// Related packages
+//
+// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to
+// and from JSON.
+//
+// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to
+// and from the text format.
+//
+// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a
+// reflection interface for protocol buffer data types.
+//
+// • Package "google.golang.org/protobuf/testing/protocmp" provides features
+// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp"
+// package.
+//
+// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic
+// message type, suitable for working with messages where the protocol buffer
+// type is only known at runtime.
+//
+// This module contains additional packages for more specialized use cases.
+// Consult the individual package documentation for details.
+package proto
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
new file mode 100644
index 0000000..d18239c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -0,0 +1,319 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/order"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// MarshalOptions configures the marshaler.
+//
+// Example usage:
+// b, err := MarshalOptions{Deterministic: true}.Marshal(m)
+type MarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // AllowPartial allows messages that have missing required fields to marshal
+ // without returning an error. If AllowPartial is false (the default),
+ // Marshal will return an error if there are any missing required fields.
+ AllowPartial bool
+
+ // Deterministic controls whether the same message will always be
+ // serialized to the same bytes within the same binary.
+ //
+ // Setting this option guarantees that repeated serialization of
+ // the same message will return the same bytes, and that different
+ // processes of the same binary (which may be executing on different
+ // machines) will serialize equal messages to the same bytes.
+ // It has no effect on the resulting size of the encoded message compared
+ // to a non-deterministic marshal.
+ //
+ // Note that the deterministic serialization is NOT canonical across
+ // languages. It is not guaranteed to remain stable over time. It is
+ // unstable across different builds with schema changes due to unknown
+ // fields. Users who need canonical serialization (e.g., persistent
+ // storage in a canonical form, fingerprinting, etc.) must define
+ // their own canonicalization specification and implement their own
+ // serializer rather than relying on this API.
+ //
+ // If deterministic serialization is requested, map entries will be
+ // sorted by keys in lexographical order. This is an implementation
+ // detail and subject to change.
+ Deterministic bool
+
+ // UseCachedSize indicates that the result of a previous Size call
+ // may be reused.
+ //
+ // Setting this option asserts that:
+ //
+ // 1. Size has previously been called on this message with identical
+ // options (except for UseCachedSize itself).
+ //
+ // 2. The message and all its submessages have not changed in any
+ // way since the Size call.
+ //
+ // If either of these invariants is violated,
+ // the results are undefined and may include panics or corrupted output.
+ //
+ // Implementations MAY take this option into account to provide
+ // better performance, but there is no guarantee that they will do so.
+ // There is absolutely no guarantee that Size followed by Marshal with
+ // UseCachedSize set will perform equivalently to Marshal alone.
+ UseCachedSize bool
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return nil, nil
+ }
+
+ out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect())
+ if len(out.Buf) == 0 && err == nil {
+ out.Buf = emptyBytesForMessage(m)
+ }
+ return out.Buf, err
+}
+
+// Marshal returns the wire-format encoding of m.
+func (o MarshalOptions) Marshal(m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return nil, nil
+ }
+
+ out, err := o.marshal(nil, m.ProtoReflect())
+ if len(out.Buf) == 0 && err == nil {
+ out.Buf = emptyBytesForMessage(m)
+ }
+ return out.Buf, err
+}
+
+// emptyBytesForMessage returns a nil buffer if and only if m is invalid,
+// otherwise it returns a non-nil empty buffer.
+//
+// This is to assist the edge-case where user-code does the following:
+// m1.OptionalBytes, _ = proto.Marshal(m2)
+// where they expect the proto2 "optional_bytes" field to be populated
+// if any only if m2 is a valid message.
+func emptyBytesForMessage(m Message) []byte {
+ if m == nil || !m.ProtoReflect().IsValid() {
+ return nil
+ }
+ return emptyBuf[:]
+}
+
+// MarshalAppend appends the wire-format encoding of m to b,
+// returning the result.
+func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to append.
+ if m == nil {
+ return b, nil
+ }
+
+ out, err := o.marshal(b, m.ProtoReflect())
+ return out.Buf, err
+}
+
+// MarshalState returns the wire-format encoding of a message.
+//
+// This method permits fine-grained control over the marshaler.
+// Most users should use Marshal instead.
+func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
+ return o.marshal(in.Buf, in.Message)
+}
+
+// marshal is a centralized function that all marshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for marshal that do not go through this.
+func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) {
+ allowPartial := o.AllowPartial
+ o.AllowPartial = true
+ if methods := protoMethods(m); methods != nil && methods.Marshal != nil &&
+ !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) {
+ in := protoiface.MarshalInput{
+ Message: m,
+ Buf: b,
+ }
+ if o.Deterministic {
+ in.Flags |= protoiface.MarshalDeterministic
+ }
+ if o.UseCachedSize {
+ in.Flags |= protoiface.MarshalUseCachedSize
+ }
+ if methods.Size != nil {
+ sout := methods.Size(protoiface.SizeInput{
+ Message: m,
+ Flags: in.Flags,
+ })
+ if cap(b) < len(b)+sout.Size {
+ in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size))
+ copy(in.Buf, b)
+ }
+ in.Flags |= protoiface.MarshalUseCachedSize
+ }
+ out, err = methods.Marshal(in)
+ } else {
+ out.Buf, err = o.marshalMessageSlow(b, m)
+ }
+ if err != nil {
+ return out, err
+ }
+ if allowPartial {
+ return out, nil
+ }
+ return out, checkInitialized(m)
+}
+
+func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) {
+ out, err := o.marshal(b, m)
+ return out.Buf, err
+}
+
+// growcap scales up the capacity of a slice.
+//
+// Given a slice with a current capacity of oldcap and a desired
+// capacity of wantcap, growcap returns a new capacity >= wantcap.
+//
+// The algorithm is mostly identical to the one used by append as of Go 1.14.
+func growcap(oldcap, wantcap int) (newcap int) {
+ if wantcap > oldcap*2 {
+ newcap = wantcap
+ } else if oldcap < 1024 {
+ // The Go 1.14 runtime takes this case when len(s) < 1024,
+ // not when cap(s) < 1024. The difference doesn't seem
+ // significant here.
+ newcap = oldcap * 2
+ } else {
+ newcap = oldcap
+ for 0 < newcap && newcap < wantcap {
+ newcap += newcap / 4
+ }
+ if newcap <= 0 {
+ newcap = wantcap
+ }
+ }
+ return newcap
+}
+
+func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) {
+ if messageset.IsMessageSet(m.Descriptor()) {
+ return o.marshalMessageSet(b, m)
+ }
+ fieldOrder := order.AnyFieldOrder
+ if o.Deterministic {
+ // TODO: This should use a more natural ordering like NumberFieldOrder,
+ // but doing so breaks golden tests that make invalid assumption about
+ // output stability of this implementation.
+ fieldOrder = order.LegacyFieldOrder
+ }
+ var err error
+ order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ b, err = o.marshalField(b, fd, v)
+ return err == nil
+ })
+ if err != nil {
+ return b, err
+ }
+ b = append(b, m.GetUnknown()...)
+ return b, nil
+}
+
+func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
+ switch {
+ case fd.IsList():
+ return o.marshalList(b, fd, value.List())
+ case fd.IsMap():
+ return o.marshalMap(b, fd, value.Map())
+ default:
+ b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()])
+ return o.marshalSingular(b, fd, value)
+ }
+}
+
+func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) {
+ if fd.IsPacked() && list.Len() > 0 {
+ b = protowire.AppendTag(b, fd.Number(), protowire.BytesType)
+ b, pos := appendSpeculativeLength(b)
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ var err error
+ b, err = o.marshalSingular(b, fd, list.Get(i))
+ if err != nil {
+ return b, err
+ }
+ }
+ b = finishSpeculativeLength(b, pos)
+ return b, nil
+ }
+
+ kind := fd.Kind()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ var err error
+ b = protowire.AppendTag(b, fd.Number(), wireTypes[kind])
+ b, err = o.marshalSingular(b, fd, list.Get(i))
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) {
+ keyf := fd.MapKey()
+ valf := fd.MapValue()
+ keyOrder := order.AnyKeyOrder
+ if o.Deterministic {
+ keyOrder = order.GenericKeyOrder
+ }
+ var err error
+ order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool {
+ b = protowire.AppendTag(b, fd.Number(), protowire.BytesType)
+ var pos int
+ b, pos = appendSpeculativeLength(b)
+
+ b, err = o.marshalField(b, keyf, key.Value())
+ if err != nil {
+ return false
+ }
+ b, err = o.marshalField(b, valf, value)
+ if err != nil {
+ return false
+ }
+ b = finishSpeculativeLength(b, pos)
+ return true
+ })
+ return b, err
+}
+
+// When encoding length-prefixed fields, we speculatively set aside some number of bytes
+// for the length, encode the data, and then encode the length (shifting the data if necessary
+// to make room).
+const speculativeLength = 1
+
+func appendSpeculativeLength(b []byte) ([]byte, int) {
+ pos := len(b)
+ b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...)
+ return b, pos
+}
+
+func finishSpeculativeLength(b []byte, pos int) []byte {
+ mlen := len(b) - pos - speculativeLength
+ msiz := protowire.SizeVarint(uint64(mlen))
+ if msiz != speculativeLength {
+ for i := 0; i < msiz-speculativeLength; i++ {
+ b = append(b, 0)
+ }
+ copy(b[pos+msiz:], b[pos+speculativeLength:])
+ b = b[:pos+msiz+mlen]
+ }
+ protowire.AppendVarint(b[:pos], uint64(mlen))
+ return b
+}
diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go
new file mode 100644
index 0000000..185dacf
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/encode_gen.go
@@ -0,0 +1,97 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package proto
+
+import (
+ "math"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var wireTypes = map[protoreflect.Kind]protowire.Type{
+ protoreflect.BoolKind: protowire.VarintType,
+ protoreflect.EnumKind: protowire.VarintType,
+ protoreflect.Int32Kind: protowire.VarintType,
+ protoreflect.Sint32Kind: protowire.VarintType,
+ protoreflect.Uint32Kind: protowire.VarintType,
+ protoreflect.Int64Kind: protowire.VarintType,
+ protoreflect.Sint64Kind: protowire.VarintType,
+ protoreflect.Uint64Kind: protowire.VarintType,
+ protoreflect.Sfixed32Kind: protowire.Fixed32Type,
+ protoreflect.Fixed32Kind: protowire.Fixed32Type,
+ protoreflect.FloatKind: protowire.Fixed32Type,
+ protoreflect.Sfixed64Kind: protowire.Fixed64Type,
+ protoreflect.Fixed64Kind: protowire.Fixed64Type,
+ protoreflect.DoubleKind: protowire.Fixed64Type,
+ protoreflect.StringKind: protowire.BytesType,
+ protoreflect.BytesKind: protowire.BytesType,
+ protoreflect.MessageKind: protowire.BytesType,
+ protoreflect.GroupKind: protowire.StartGroupType,
+}
+
+func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ case protoreflect.EnumKind:
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ case protoreflect.Int32Kind:
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ case protoreflect.Sint32Kind:
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ case protoreflect.Uint32Kind:
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ case protoreflect.Int64Kind:
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ case protoreflect.Sint64Kind:
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ case protoreflect.Uint64Kind:
+ b = protowire.AppendVarint(b, v.Uint())
+ case protoreflect.Sfixed32Kind:
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ case protoreflect.Fixed32Kind:
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ case protoreflect.FloatKind:
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ case protoreflect.Sfixed64Kind:
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ case protoreflect.Fixed64Kind:
+ b = protowire.AppendFixed64(b, v.Uint())
+ case protoreflect.DoubleKind:
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ case protoreflect.StringKind:
+ if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) {
+ return b, errors.InvalidUTF8(string(fd.FullName()))
+ }
+ b = protowire.AppendString(b, v.String())
+ case protoreflect.BytesKind:
+ b = protowire.AppendBytes(b, v.Bytes())
+ case protoreflect.MessageKind:
+ var pos int
+ var err error
+ b, pos = appendSpeculativeLength(b)
+ b, err = o.marshalMessage(b, v.Message())
+ if err != nil {
+ return b, err
+ }
+ b = finishSpeculativeLength(b, pos)
+ case protoreflect.GroupKind:
+ var err error
+ b, err = o.marshalMessage(b, v.Message())
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType))
+ default:
+ return b, errors.New("invalid kind %v", fd.Kind())
+ }
+ return b, nil
+}
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
new file mode 100644
index 0000000..4dba2b9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "math"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they belong to the same message descriptor,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values. If either of the top-level
+// messages are invalid, then Equal reports true only if both are invalid.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ if x == nil || y == nil {
+ return x == nil && y == nil
+ }
+ mx := x.ProtoReflect()
+ my := y.ProtoReflect()
+ if mx.IsValid() != my.IsValid() {
+ return false
+ }
+ return equalMessage(mx, my)
+}
+
+// equalMessage compares two messages.
+func equalMessage(mx, my pref.Message) bool {
+ if mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ nx := 0
+ equal := true
+ mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool {
+ nx++
+ vy := my.Get(fd)
+ equal = my.Has(fd) && equalField(fd, vx, vy)
+ return equal
+ })
+ if !equal {
+ return false
+ }
+ ny := 0
+ my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool {
+ ny++
+ return true
+ })
+ if nx != ny {
+ return false
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+// equalField compares two fields.
+func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool {
+ switch {
+ case fd.IsList():
+ return equalList(fd, x.List(), y.List())
+ case fd.IsMap():
+ return equalMap(fd, x.Map(), y.Map())
+ default:
+ return equalValue(fd, x, y)
+ }
+}
+
+// equalMap compares two maps.
+func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ equal := true
+ x.Range(func(k pref.MapKey, vx pref.Value) bool {
+ vy := y.Get(k)
+ equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
+ return equal
+ })
+ return equal
+}
+
+// equalList compares two lists.
+func equalList(fd pref.FieldDescriptor, x, y pref.List) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ for i := x.Len() - 1; i >= 0; i-- {
+ if !equalValue(fd, x.Get(i), y.Get(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+// equalValue compares two singular values.
+func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool {
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return x.Bool() == y.Bool()
+ case pref.EnumKind:
+ return x.Enum() == y.Enum()
+ case pref.Int32Kind, pref.Sint32Kind,
+ pref.Int64Kind, pref.Sint64Kind,
+ pref.Sfixed32Kind, pref.Sfixed64Kind:
+ return x.Int() == y.Int()
+ case pref.Uint32Kind, pref.Uint64Kind,
+ pref.Fixed32Kind, pref.Fixed64Kind:
+ return x.Uint() == y.Uint()
+ case pref.FloatKind, pref.DoubleKind:
+ fx := x.Float()
+ fy := y.Float()
+ if math.IsNaN(fx) || math.IsNaN(fy) {
+ return math.IsNaN(fx) && math.IsNaN(fy)
+ }
+ return fx == fy
+ case pref.StringKind:
+ return x.String() == y.String()
+ case pref.BytesKind:
+ return bytes.Equal(x.Bytes(), y.Bytes())
+ case pref.MessageKind, pref.GroupKind:
+ return equalMessage(x.Message(), y.Message())
+ default:
+ return x.Interface() == y.Interface()
+ }
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+func equalUnknown(x, y pref.RawFields) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[pref.FieldNumber]pref.RawFields)
+ my := make(map[pref.FieldNumber]pref.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ return reflect.DeepEqual(mx, my)
+}
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
new file mode 100644
index 0000000..5f293cd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -0,0 +1,92 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// HasExtension reports whether an extension field is populated.
+// It returns false if m is invalid or if xt does not extend m.
+func HasExtension(m Message, xt protoreflect.ExtensionType) bool {
+ // Treat nil message interface as an empty message; no populated fields.
+ if m == nil {
+ return false
+ }
+
+ // As a special-case, we reports invalid or mismatching descriptors
+ // as always not being populated (since they aren't).
+ if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() {
+ return false
+ }
+
+ return m.ProtoReflect().Has(xt.TypeDescriptor())
+}
+
+// ClearExtension clears an extension field such that subsequent
+// HasExtension calls return false.
+// It panics if m is invalid or if xt does not extend m.
+func ClearExtension(m Message, xt protoreflect.ExtensionType) {
+ m.ProtoReflect().Clear(xt.TypeDescriptor())
+}
+
+// GetExtension retrieves the value for an extension field.
+// If the field is unpopulated, it returns the default value for
+// scalars and an immutable, empty value for lists or messages.
+// It panics if xt does not extend m.
+func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
+ // Treat nil message interface as an empty message; return the default.
+ if m == nil {
+ return xt.InterfaceOf(xt.Zero())
+ }
+
+ return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor()))
+}
+
+// SetExtension stores the value of an extension field.
+// It panics if m is invalid, xt does not extend m, or if type of v
+// is invalid for the specified extension field.
+func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
+ xd := xt.TypeDescriptor()
+ pv := xt.ValueOf(v)
+
+ // Specially treat an invalid list, map, or message as clear.
+ isValid := true
+ switch {
+ case xd.IsList():
+ isValid = pv.List().IsValid()
+ case xd.IsMap():
+ isValid = pv.Map().IsValid()
+ case xd.Message() != nil:
+ isValid = pv.Message().IsValid()
+ }
+ if !isValid {
+ m.ProtoReflect().Clear(xd)
+ return
+ }
+
+ m.ProtoReflect().Set(xd, pv)
+}
+
+// RangeExtensions iterates over every populated extension field in m in an
+// undefined order, calling f for each extension type and value encountered.
+// It returns immediately if f returns false.
+// While iterating, mutating operations may only be performed
+// on the current extension field.
+func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) {
+ // Treat nil message interface as an empty message; nothing to range over.
+ if m == nil {
+ return
+ }
+
+ m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor).Type()
+ vi := xt.InterfaceOf(v)
+ return f(xt, vi)
+ }
+ return true
+ })
+}
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
new file mode 100644
index 0000000..d761ab3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -0,0 +1,139 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Merge merges src into dst, which must be a message with the same descriptor.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+//
+// It is semantically equivalent to unmarshaling the encoded form of src
+// into dst with the UnmarshalOptions.Merge option specified.
+func Merge(dst, src Message) {
+ // TODO: Should nil src be treated as semantically equivalent to a
+ // untyped, read-only, empty message? What about a nil dst?
+
+ dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect()
+ if dstMsg.Descriptor() != srcMsg.Descriptor() {
+ if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want {
+ panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want))
+ }
+ panic("descriptor mismatch")
+ }
+ mergeOptions{}.mergeMessage(dstMsg, srcMsg)
+}
+
+// Clone returns a deep copy of m.
+// If the top-level message is invalid, it returns an invalid message as well.
+func Clone(m Message) Message {
+ // NOTE: Most usages of Clone assume the following properties:
+ // t := reflect.TypeOf(m)
+ // t == reflect.TypeOf(m.ProtoReflect().New().Interface())
+ // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface())
+ //
+ // Embedding protobuf messages breaks this since the parent type will have
+ // a forwarded ProtoReflect method, but the Interface method will return
+ // the underlying embedded message type.
+ if m == nil {
+ return nil
+ }
+ src := m.ProtoReflect()
+ if !src.IsValid() {
+ return src.Type().Zero().Interface()
+ }
+ dst := src.New()
+ mergeOptions{}.mergeMessage(dst, src)
+ return dst.Interface()
+}
+
+// mergeOptions provides a namespace for merge functions, and can be
+// exported in the future if we add user-visible merge options.
+type mergeOptions struct{}
+
+func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) {
+ methods := protoMethods(dst)
+ if methods != nil && methods.Merge != nil {
+ in := protoiface.MergeInput{
+ Destination: dst,
+ Source: src,
+ }
+ out := methods.Merge(in)
+ if out.Flags&protoiface.MergeComplete != 0 {
+ return
+ }
+ }
+
+ if !dst.IsValid() {
+ panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName()))
+ }
+
+ src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ case fd.IsList():
+ o.mergeList(dst.Mutable(fd).List(), v.List(), fd)
+ case fd.IsMap():
+ o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue())
+ case fd.Message() != nil:
+ o.mergeMessage(dst.Mutable(fd).Message(), v.Message())
+ case fd.Kind() == protoreflect.BytesKind:
+ dst.Set(fd, o.cloneBytes(v))
+ default:
+ dst.Set(fd, v)
+ }
+ return true
+ })
+
+ if len(src.GetUnknown()) > 0 {
+ dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...))
+ }
+}
+
+func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) {
+ // Merge semantics appends to the end of the existing list.
+ for i, n := 0, src.Len(); i < n; i++ {
+ switch v := src.Get(i); {
+ case fd.Message() != nil:
+ dstv := dst.NewElement()
+ o.mergeMessage(dstv.Message(), v.Message())
+ dst.Append(dstv)
+ case fd.Kind() == protoreflect.BytesKind:
+ dst.Append(o.cloneBytes(v))
+ default:
+ dst.Append(v)
+ }
+ }
+}
+
+func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) {
+ // Merge semantics replaces, rather than merges into existing entries.
+ src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ switch {
+ case fd.Message() != nil:
+ dstv := dst.NewValue()
+ o.mergeMessage(dstv.Message(), v.Message())
+ dst.Set(k, dstv)
+ case fd.Kind() == protoreflect.BytesKind:
+ dst.Set(k, o.cloneBytes(v))
+ default:
+ dst.Set(k, v)
+ }
+ return true
+ })
+}
+
+func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value {
+ return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...))
+}
diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go
new file mode 100644
index 0000000..312d5d4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/messageset.go
@@ -0,0 +1,93 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/order"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) {
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ size += messageset.SizeField(fd.Number())
+ size += protowire.SizeTag(messageset.FieldMessage)
+ size += protowire.SizeBytes(o.size(v.Message()))
+ return true
+ })
+ size += messageset.SizeUnknown(m.GetUnknown())
+ return size
+}
+
+func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) {
+ if !flags.ProtoLegacy {
+ return b, errors.New("no support for message_set_wire_format")
+ }
+ fieldOrder := order.AnyFieldOrder
+ if o.Deterministic {
+ fieldOrder = order.NumberFieldOrder
+ }
+ var err error
+ order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ b, err = o.marshalMessageSetField(b, fd, v)
+ return err == nil
+ })
+ if err != nil {
+ return b, err
+ }
+ return messageset.AppendUnknown(b, m.GetUnknown())
+}
+
+func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
+ b = messageset.AppendFieldStart(b, fd.Number())
+ b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType)
+ b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface())))
+ b, err := o.marshalMessage(b, value.Message())
+ if err != nil {
+ return b, err
+ }
+ b = messageset.AppendFieldEnd(b)
+ return b, nil
+}
+
+func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error {
+ if !flags.ProtoLegacy {
+ return errors.New("no support for message_set_wire_format")
+ }
+ return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error {
+ err := o.unmarshalMessageSetField(m, num, v)
+ if err == errUnknown {
+ unknown := m.GetUnknown()
+ unknown = protowire.AppendTag(unknown, num, protowire.BytesType)
+ unknown = protowire.AppendBytes(unknown, v)
+ m.SetUnknown(unknown)
+ return nil
+ }
+ return err
+ })
+}
+
+func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error {
+ md := m.Descriptor()
+ if !md.ExtensionRanges().Has(num) {
+ return errUnknown
+ }
+ xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num)
+ if err == protoregistry.NotFound {
+ return errUnknown
+ }
+ if err != nil {
+ return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err)
+ }
+ xd := xt.TypeDescriptor()
+ if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go
new file mode 100644
index 0000000..1f0d183
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/proto.go
@@ -0,0 +1,43 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Message is the top-level interface that all messages must implement.
+// It provides access to a reflective view of a message.
+// Any implementation of this interface may be used with all functions in the
+// protobuf module that accept a Message, except where otherwise specified.
+//
+// This is the v2 interface definition for protobuf messages.
+// The v1 interface definition is "github.com/golang/protobuf/proto".Message.
+//
+// To convert a v1 message to a v2 message,
+// use "github.com/golang/protobuf/proto".MessageV2.
+// To convert a v2 message to a v1 message,
+// use "github.com/golang/protobuf/proto".MessageV1.
+type Message = protoreflect.ProtoMessage
+
+// Error matches all errors produced by packages in the protobuf module.
+//
+// That is, errors.Is(err, Error) reports whether an error is produced
+// by this module.
+var Error error
+
+func init() {
+ Error = errors.Error
+}
+
+// MessageName returns the full name of m.
+// If m is nil, it returns an empty string.
+func MessageName(m Message) protoreflect.FullName {
+ if m == nil {
+ return ""
+ }
+ return m.ProtoReflect().Descriptor().FullName()
+}
diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go
new file mode 100644
index 0000000..d8dd604
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The protoreflect build tag disables use of fast-path methods.
+// +build !protoreflect
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+const hasProtoMethods = true
+
+func protoMethods(m protoreflect.Message) *protoiface.Methods {
+ return m.ProtoMethods()
+}
diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go
new file mode 100644
index 0000000..b103d43
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The protoreflect build tag disables use of fast-path methods.
+// +build protoreflect
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+const hasProtoMethods = false
+
+func protoMethods(m protoreflect.Message) *protoiface.Methods {
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go
new file mode 100644
index 0000000..3d7f894
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/reset.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Reset clears every field in the message.
+// The resulting message shares no observable memory with its previous state
+// other than the memory for the message itself.
+func Reset(m Message) {
+ if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods {
+ mr.Reset()
+ return
+ }
+ resetMessage(m.ProtoReflect())
+}
+
+func resetMessage(m protoreflect.Message) {
+ if !m.IsValid() {
+ panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName()))
+ }
+
+ // Clear all known fields.
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ m.Clear(fds.Get(i))
+ }
+
+ // Clear extension fields.
+ m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ m.Clear(fd)
+ return true
+ })
+
+ // Clear unknown fields.
+ m.SetUnknown(nil)
+}
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
new file mode 100644
index 0000000..554b9c6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -0,0 +1,97 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ return MarshalOptions{}.Size(m)
+}
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func (o MarshalOptions) Size(m Message) int {
+ // Treat a nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return 0
+ }
+
+ return o.size(m.ProtoReflect())
+}
+
+// size is a centralized function that all size operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for size that do not go through this.
+func (o MarshalOptions) size(m protoreflect.Message) (size int) {
+ methods := protoMethods(m)
+ if methods != nil && methods.Size != nil {
+ out := methods.Size(protoiface.SizeInput{
+ Message: m,
+ })
+ return out.Size
+ }
+ if methods != nil && methods.Marshal != nil {
+ // This is not efficient, but we don't have any choice.
+ // This case is mainly used for legacy types with a Marshal method.
+ out, _ := methods.Marshal(protoiface.MarshalInput{
+ Message: m,
+ })
+ return len(out.Buf)
+ }
+ return o.sizeMessageSlow(m)
+}
+
+func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) {
+ if messageset.IsMessageSet(m.Descriptor()) {
+ return o.sizeMessageSet(m)
+ }
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ size += o.sizeField(fd, v)
+ return true
+ })
+ size += len(m.GetUnknown())
+ return size
+}
+
+func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) {
+ num := fd.Number()
+ switch {
+ case fd.IsList():
+ return o.sizeList(num, fd, value.List())
+ case fd.IsMap():
+ return o.sizeMap(num, fd, value.Map())
+ default:
+ return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value)
+ }
+}
+
+func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
+ if fd.IsPacked() && list.Len() > 0 {
+ content := 0
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ content += o.sizeSingular(num, fd.Kind(), list.Get(i))
+ }
+ return protowire.SizeTag(num) + protowire.SizeBytes(content)
+ }
+
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i))
+ }
+ return size
+}
+
+func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
+ mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool {
+ size += protowire.SizeTag(num)
+ size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value))
+ return true
+ })
+ return size
+}
diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go
new file mode 100644
index 0000000..3cf61a8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/size_gen.go
@@ -0,0 +1,55 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int {
+ switch kind {
+ case protoreflect.BoolKind:
+ return protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ case protoreflect.EnumKind:
+ return protowire.SizeVarint(uint64(v.Enum()))
+ case protoreflect.Int32Kind:
+ return protowire.SizeVarint(uint64(int32(v.Int())))
+ case protoreflect.Sint32Kind:
+ return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ case protoreflect.Uint32Kind:
+ return protowire.SizeVarint(uint64(uint32(v.Uint())))
+ case protoreflect.Int64Kind:
+ return protowire.SizeVarint(uint64(v.Int()))
+ case protoreflect.Sint64Kind:
+ return protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ case protoreflect.Uint64Kind:
+ return protowire.SizeVarint(v.Uint())
+ case protoreflect.Sfixed32Kind:
+ return protowire.SizeFixed32()
+ case protoreflect.Fixed32Kind:
+ return protowire.SizeFixed32()
+ case protoreflect.FloatKind:
+ return protowire.SizeFixed32()
+ case protoreflect.Sfixed64Kind:
+ return protowire.SizeFixed64()
+ case protoreflect.Fixed64Kind:
+ return protowire.SizeFixed64()
+ case protoreflect.DoubleKind:
+ return protowire.SizeFixed64()
+ case protoreflect.StringKind:
+ return protowire.SizeBytes(len(v.String()))
+ case protoreflect.BytesKind:
+ return protowire.SizeBytes(len(v.Bytes()))
+ case protoreflect.MessageKind:
+ return protowire.SizeBytes(o.size(v.Message()))
+ case protoreflect.GroupKind:
+ return protowire.SizeGroup(num, o.size(v.Message()))
+ default:
+ return 0
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go
new file mode 100644
index 0000000..653b12c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/wrappers.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
new file mode 100644
index 0000000..e4dfb12
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -0,0 +1,276 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protodesc provides functionality for converting
+// FileDescriptorProto messages to/from protoreflect.FileDescriptor values.
+//
+// The google.protobuf.FileDescriptorProto is a protobuf message that describes
+// the type information for a .proto file in a form that is easily serializable.
+// The protoreflect.FileDescriptor is a more structured representation of
+// the FileDescriptorProto message where references and remote dependencies
+// can be directly followed.
+package protodesc
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// Resolver is the resolver used by NewFile to resolve dependencies.
+// The enums and messages provided must belong to some parent file,
+// which is also registered.
+//
+// It is implemented by protoregistry.Files.
+type Resolver interface {
+ FindFileByPath(string) (protoreflect.FileDescriptor, error)
+ FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
+}
+
+// FileOptions configures the construction of file descriptors.
+type FileOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // AllowUnresolvable configures New to permissively allow unresolvable
+ // file, enum, or message dependencies. Unresolved dependencies are replaced
+ // by placeholder equivalents.
+ //
+ // The following dependencies may be left unresolved:
+ // • Resolving an imported file.
+ // • Resolving the type for a message field or extension field.
+ // If the kind of the field is unknown, then a placeholder is used for both
+ // the Enum and Message accessors on the protoreflect.FieldDescriptor.
+ // • Resolving an enum value set as the default for an optional enum field.
+ // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the
+ // first value in the associated enum (or zero if the also enum dependency
+ // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue
+ // is populated with a placeholder.
+ // • Resolving the extended message type for an extension field.
+ // • Resolving the input or output message type for a service method.
+ //
+ // If the unresolved dependency uses a relative name,
+ // then the placeholder will contain an invalid FullName with a "*." prefix,
+ // indicating that the starting prefix of the full name is unknown.
+ AllowUnresolvable bool
+}
+
+// NewFile creates a new protoreflect.FileDescriptor from the provided
+// file descriptor message. See FileOptions.New for more information.
+func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) {
+ return FileOptions{}.New(fd, r)
+}
+
+// NewFiles creates a new protoregistry.Files from the provided
+// FileDescriptorSet message. See FileOptions.NewFiles for more information.
+func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) {
+ return FileOptions{}.NewFiles(fd)
+}
+
+// New creates a new protoreflect.FileDescriptor from the provided
+// file descriptor message. The file must represent a valid proto file according
+// to protobuf semantics. The returned descriptor is a deep copy of the input.
+//
+// Any imported files, enum types, or message types referenced in the file are
+// resolved using the provided registry. When looking up an import file path,
+// the path must be unique. The newly created file descriptor is not registered
+// back into the provided file registry.
+func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) {
+ if r == nil {
+ r = (*protoregistry.Files)(nil) // empty resolver
+ }
+
+ // Handle the file descriptor content.
+ f := &filedesc.File{L2: &filedesc.FileL2{}}
+ switch fd.GetSyntax() {
+ case "proto2", "":
+ f.L1.Syntax = protoreflect.Proto2
+ case "proto3":
+ f.L1.Syntax = protoreflect.Proto3
+ default:
+ return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
+ }
+ f.L1.Path = fd.GetName()
+ if f.L1.Path == "" {
+ return nil, errors.New("file path must be populated")
+ }
+ f.L1.Package = protoreflect.FullName(fd.GetPackage())
+ if !f.L1.Package.IsValid() && f.L1.Package != "" {
+ return nil, errors.New("invalid package: %q", f.L1.Package)
+ }
+ if opts := fd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.FileOptions)
+ f.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+
+ f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency()))
+ for _, i := range fd.GetPublicDependency() {
+ if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic {
+ return nil, errors.New("invalid or duplicate public import index: %d", i)
+ }
+ f.L2.Imports[i].IsPublic = true
+ }
+ for _, i := range fd.GetWeakDependency() {
+ if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak {
+ return nil, errors.New("invalid or duplicate weak import index: %d", i)
+ }
+ f.L2.Imports[i].IsWeak = true
+ }
+ imps := importSet{f.Path(): true}
+ for i, path := range fd.GetDependency() {
+ imp := &f.L2.Imports[i]
+ f, err := r.FindFileByPath(path)
+ if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) {
+ f = filedesc.PlaceholderFile(path)
+ } else if err != nil {
+ return nil, errors.New("could not resolve import %q: %v", path, err)
+ }
+ imp.FileDescriptor = f
+
+ if imps[imp.Path()] {
+ return nil, errors.New("already imported %q", path)
+ }
+ imps[imp.Path()] = true
+ }
+ for i := range fd.GetDependency() {
+ imp := &f.L2.Imports[i]
+ imps.importPublic(imp.Imports())
+ }
+
+ // Handle source locations.
+ f.L2.Locations.File = f
+ for _, loc := range fd.GetSourceCodeInfo().GetLocation() {
+ var l protoreflect.SourceLocation
+ // TODO: Validate that the path points to an actual declaration?
+ l.Path = protoreflect.SourcePath(loc.GetPath())
+ s := loc.GetSpan()
+ switch len(s) {
+ case 3:
+ l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2])
+ case 4:
+ l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3])
+ default:
+ return nil, errors.New("invalid span: %v", s)
+ }
+ // TODO: Validate that the span information is sensible?
+ // See https://github.com/protocolbuffers/protobuf/issues/6378.
+ if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 ||
+ (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) {
+ return nil, errors.New("invalid span: %v", s)
+ }
+ l.LeadingDetachedComments = loc.GetLeadingDetachedComments()
+ l.LeadingComments = loc.GetLeadingComments()
+ l.TrailingComments = loc.GetTrailingComments()
+ f.L2.Locations.List = append(f.L2.Locations.List, l)
+ }
+
+ // Step 1: Allocate and derive the names for all declarations.
+ // This copies all fields from the descriptor proto except:
+ // google.protobuf.FieldDescriptorProto.type_name
+ // google.protobuf.FieldDescriptorProto.default_value
+ // google.protobuf.FieldDescriptorProto.oneof_index
+ // google.protobuf.FieldDescriptorProto.extendee
+ // google.protobuf.MethodDescriptorProto.input
+ // google.protobuf.MethodDescriptorProto.output
+ var err error
+ sb := new(strs.Builder)
+ r1 := make(descsByName)
+ if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil {
+ return nil, err
+ }
+ if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil {
+ return nil, err
+ }
+ if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil {
+ return nil, err
+ }
+ if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil {
+ return nil, err
+ }
+
+ // Step 2: Resolve every dependency reference not handled by step 1.
+ r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable}
+ if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil {
+ return nil, err
+ }
+ if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil {
+ return nil, err
+ }
+ if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil {
+ return nil, err
+ }
+
+ // Step 3: Validate every enum, message, and extension declaration.
+ if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil {
+ return nil, err
+ }
+ if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil {
+ return nil, err
+ }
+ if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil {
+ return nil, err
+ }
+
+ return f, nil
+}
+
+type importSet map[string]bool
+
+func (is importSet) importPublic(imps protoreflect.FileImports) {
+ for i := 0; i < imps.Len(); i++ {
+ if imp := imps.Get(i); imp.IsPublic {
+ is[imp.Path()] = true
+ is.importPublic(imp.Imports())
+ }
+ }
+}
+
+// NewFiles creates a new protoregistry.Files from the provided
+// FileDescriptorSet message. The descriptor set must include only
+// valid files according to protobuf semantics. The returned descriptors
+// are a deep copy of the input.
+func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) {
+ files := make(map[string]*descriptorpb.FileDescriptorProto)
+ for _, fd := range fds.File {
+ if _, ok := files[fd.GetName()]; ok {
+ return nil, errors.New("file appears multiple times: %q", fd.GetName())
+ }
+ files[fd.GetName()] = fd
+ }
+ r := &protoregistry.Files{}
+ for _, fd := range files {
+ if err := o.addFileDeps(r, fd, files); err != nil {
+ return nil, err
+ }
+ }
+ return r, nil
+}
+func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error {
+ // Set the entry to nil while descending into a file's dependencies to detect cycles.
+ files[fd.GetName()] = nil
+ for _, dep := range fd.Dependency {
+ depfd, ok := files[dep]
+ if depfd == nil {
+ if ok {
+ return errors.New("import cycle in file: %q", dep)
+ }
+ continue
+ }
+ if err := o.addFileDeps(r, depfd, files); err != nil {
+ return err
+ }
+ }
+ // Delete the entry once dependencies are processed.
+ delete(files, fd.GetName())
+ f, err := o.New(fd, r)
+ if err != nil {
+ return err
+ }
+ return r.RegisterFile(f)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
new file mode 100644
index 0000000..37efda1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -0,0 +1,248 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+type descsByName map[protoreflect.FullName]protoreflect.Descriptor
+
+func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) {
+ es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers
+ for i, ed := range eds {
+ e := &es[i]
+ e.L2 = new(filedesc.EnumL2)
+ if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := ed.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.EnumOptions)
+ e.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ for _, s := range ed.GetReservedName() {
+ e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s))
+ }
+ for _, rr := range ed.GetReservedRange() {
+ e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{
+ protoreflect.EnumNumber(rr.GetStart()),
+ protoreflect.EnumNumber(rr.GetEnd()),
+ })
+ }
+ if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil {
+ return nil, err
+ }
+ }
+ return es, nil
+}
+
+func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) {
+ vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers
+ for i, vd := range vds {
+ v := &vs[i]
+ if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := vd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions)
+ v.L1.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ v.L1.Number = protoreflect.EnumNumber(vd.GetNumber())
+ }
+ return vs, nil
+}
+
+func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) {
+ ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers
+ for i, md := range mds {
+ m := &ms[i]
+ m.L2 = new(filedesc.MessageL2)
+ if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := md.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.MessageOptions)
+ m.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ m.L1.IsMapEntry = opts.GetMapEntry()
+ m.L1.IsMessageSet = opts.GetMessageSetWireFormat()
+ }
+ for _, s := range md.GetReservedName() {
+ m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s))
+ }
+ for _, rr := range md.GetReservedRange() {
+ m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{
+ protoreflect.FieldNumber(rr.GetStart()),
+ protoreflect.FieldNumber(rr.GetEnd()),
+ })
+ }
+ for _, xr := range md.GetExtensionRange() {
+ m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{
+ protoreflect.FieldNumber(xr.GetStart()),
+ protoreflect.FieldNumber(xr.GetEnd()),
+ })
+ var optsFunc func() protoreflect.ProtoMessage
+ if opts := xr.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions)
+ optsFunc = func() protoreflect.ProtoMessage { return opts }
+ }
+ m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc)
+ }
+ if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil {
+ return nil, err
+ }
+ if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil {
+ return nil, err
+ }
+ if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil {
+ return nil, err
+ }
+ if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil {
+ return nil, err
+ }
+ if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil {
+ return nil, err
+ }
+ }
+ return ms, nil
+}
+
+func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) {
+ fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers
+ for i, fd := range fds {
+ f := &fs[i]
+ if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ f.L1.IsProto3Optional = fd.GetProto3Optional()
+ if opts := fd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
+ f.L1.Options = func() protoreflect.ProtoMessage { return opts }
+ f.L1.IsWeak = opts.GetWeak()
+ f.L1.HasPacked = opts.Packed != nil
+ f.L1.IsPacked = opts.GetPacked()
+ }
+ f.L1.Number = protoreflect.FieldNumber(fd.GetNumber())
+ f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel())
+ if fd.Type != nil {
+ f.L1.Kind = protoreflect.Kind(fd.GetType())
+ }
+ if fd.JsonName != nil {
+ f.L1.StringName.InitJSON(fd.GetJsonName())
+ }
+ }
+ return fs, nil
+}
+
+func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) {
+ os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers
+ for i, od := range ods {
+ o := &os[i]
+ if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := od.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.OneofOptions)
+ o.L1.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ }
+ return os, nil
+}
+
+func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) {
+ xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers
+ for i, xd := range xds {
+ x := &xs[i]
+ x.L2 = new(filedesc.ExtensionL2)
+ if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := xd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
+ x.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ x.L2.IsPacked = opts.GetPacked()
+ }
+ x.L1.Number = protoreflect.FieldNumber(xd.GetNumber())
+ x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel())
+ if xd.Type != nil {
+ x.L1.Kind = protoreflect.Kind(xd.GetType())
+ }
+ if xd.JsonName != nil {
+ x.L2.StringName.InitJSON(xd.GetJsonName())
+ }
+ }
+ return xs, nil
+}
+
+func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) {
+ ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers
+ for i, sd := range sds {
+ s := &ss[i]
+ s.L2 = new(filedesc.ServiceL2)
+ if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := sd.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.ServiceOptions)
+ s.L2.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil {
+ return nil, err
+ }
+ }
+ return ss, nil
+}
+
+func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) {
+ ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers
+ for i, md := range mds {
+ m := &ms[i]
+ if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil {
+ return nil, err
+ }
+ if opts := md.GetOptions(); opts != nil {
+ opts = proto.Clone(opts).(*descriptorpb.MethodOptions)
+ m.L1.Options = func() protoreflect.ProtoMessage { return opts }
+ }
+ m.L1.IsStreamingClient = md.GetClientStreaming()
+ m.L1.IsStreamingServer = md.GetServerStreaming()
+ }
+ return ms, nil
+}
+
+func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) {
+ if !protoreflect.Name(name).IsValid() {
+ return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name)
+ }
+
+ // Derive the full name of the child.
+ // Note that enum values are a sibling to the enum parent in the namespace.
+ var fullName protoreflect.FullName
+ if _, ok := parent.(protoreflect.EnumDescriptor); ok {
+ fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name))
+ } else {
+ fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name))
+ }
+ if _, ok := r[fullName]; ok {
+ return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName)
+ }
+ r[fullName] = child
+
+ // TODO: Verify that the full name does not already exist in the resolver?
+ // This is not as critical since most usages of NewFile will register
+ // the created file back into the registry, which will perform this check.
+
+ return filedesc.BaseL0{
+ FullName: fullName,
+ ParentFile: parent.ParentFile().(*filedesc.File),
+ Parent: parent,
+ Index: idx,
+ }, nil
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
new file mode 100644
index 0000000..cebb36c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
@@ -0,0 +1,286 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+ "google.golang.org/protobuf/internal/encoding/defval"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// resolver is a wrapper around a local registry of declarations within the file
+// and the remote resolver. The remote resolver is restricted to only return
+// descriptors that have been imported.
+type resolver struct {
+ local descsByName
+ remote Resolver
+ imports importSet
+
+ allowUnresolvable bool
+}
+
+func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) {
+ for i, md := range mds {
+ m := &ms[i]
+ for j, fd := range md.GetField() {
+ f := &m.L2.Fields.List[j]
+ if f.L1.Cardinality == protoreflect.Required {
+ m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number)
+ }
+ if fd.OneofIndex != nil {
+ k := int(fd.GetOneofIndex())
+ if !(0 <= k && k < len(md.GetOneofDecl())) {
+ return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k)
+ }
+ o := &m.L2.Oneofs.List[k]
+ f.L1.ContainingOneof = o
+ o.L1.Fields.List = append(o.L1.Fields.List, f)
+ }
+
+ if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil {
+ return errors.New("message field %q cannot resolve type: %v", f.FullName(), err)
+ }
+ if fd.DefaultValue != nil {
+ v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable)
+ if err != nil {
+ return errors.New("message field %q has invalid default: %v", f.FullName(), err)
+ }
+ f.L1.Default = filedesc.DefaultValue(v, ev)
+ }
+ }
+
+ if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil {
+ return err
+ }
+ if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) {
+ for i, xd := range xds {
+ x := &xs[i]
+ if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil {
+ return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err)
+ }
+ if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil {
+ return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err)
+ }
+ if xd.DefaultValue != nil {
+ v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable)
+ if err != nil {
+ return errors.New("extension field %q has invalid default: %v", x.FullName(), err)
+ }
+ x.L2.Default = filedesc.DefaultValue(v, ev)
+ }
+ }
+ return nil
+}
+
+func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) {
+ for i, sd := range sds {
+ s := &ss[i]
+ for j, md := range sd.GetMethod() {
+ m := &s.L2.Methods.List[j]
+ m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false)
+ if err != nil {
+ return errors.New("service method %q cannot resolve input: %v", m.FullName(), err)
+ }
+ m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false)
+ if err != nil {
+ return errors.New("service method %q cannot resolve output: %v", m.FullName(), err)
+ }
+ }
+ }
+ return nil
+}
+
+// findTarget finds an enum or message descriptor if k is an enum, message,
+// group, or unknown. If unknown, and the name could be resolved, the kind
+// returned kind is set based on the type of the resolved descriptor.
+func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
+ switch k {
+ case protoreflect.EnumKind:
+ ed, err := r.findEnumDescriptor(scope, ref, isWeak)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+ return k, ed, nil, nil
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ md, err := r.findMessageDescriptor(scope, ref, isWeak)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+ return k, nil, md, nil
+ case 0:
+ // Handle unspecified kinds (possible with parsers that operate
+ // on a per-file basis without knowledge of dependencies).
+ d, err := r.findDescriptor(scope, ref)
+ if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil
+ } else if err == protoregistry.NotFound {
+ return 0, nil, nil, errors.New("%q not found", ref.FullName())
+ } else if err != nil {
+ return 0, nil, nil, err
+ }
+ switch d := d.(type) {
+ case protoreflect.EnumDescriptor:
+ return protoreflect.EnumKind, d, nil, nil
+ case protoreflect.MessageDescriptor:
+ return protoreflect.MessageKind, nil, d, nil
+ default:
+ return 0, nil, nil, errors.New("unknown kind")
+ }
+ default:
+ if ref != "" {
+ return 0, nil, nil, errors.New("target name cannot be specified for %v", k)
+ }
+ if !k.IsValid() {
+ return 0, nil, nil, errors.New("invalid kind: %d", k)
+ }
+ return k, nil, nil, nil
+ }
+}
+
+// findDescriptor finds the descriptor by name,
+// which may be a relative name within some scope.
+//
+// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar",
+// then the following full names are searched:
+// * fizz.buzz.Foo.Bar
+// * fizz.Foo.Bar
+// * Foo.Bar
+func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) {
+ if !ref.IsValid() {
+ return nil, errors.New("invalid name reference: %q", ref)
+ }
+ if ref.IsFull() {
+ scope, ref = "", ref[1:]
+ }
+ var foundButNotImported protoreflect.Descriptor
+ for {
+ // Derive the full name to search.
+ s := protoreflect.FullName(ref)
+ if scope != "" {
+ s = scope + "." + s
+ }
+
+ // Check the current file for the descriptor.
+ if d, ok := r.local[s]; ok {
+ return d, nil
+ }
+
+ // Check the remote registry for the descriptor.
+ d, err := r.remote.FindDescriptorByName(s)
+ if err == nil {
+ // Only allow descriptors covered by one of the imports.
+ if r.imports[d.ParentFile().Path()] {
+ return d, nil
+ }
+ foundButNotImported = d
+ } else if err != protoregistry.NotFound {
+ return nil, errors.Wrap(err, "%q", s)
+ }
+
+ // Continue on at a higher level of scoping.
+ if scope == "" {
+ if d := foundButNotImported; d != nil {
+ return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path())
+ }
+ return nil, protoregistry.NotFound
+ }
+ scope = scope.Parent()
+ }
+}
+
+func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) {
+ d, err := r.findDescriptor(scope, ref)
+ if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ return filedesc.PlaceholderEnum(ref.FullName()), nil
+ } else if err == protoregistry.NotFound {
+ return nil, errors.New("%q not found", ref.FullName())
+ } else if err != nil {
+ return nil, err
+ }
+ ed, ok := d.(protoreflect.EnumDescriptor)
+ if !ok {
+ return nil, errors.New("resolved %q, but it is not an enum", d.FullName())
+ }
+ return ed, nil
+}
+
+func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) {
+ d, err := r.findDescriptor(scope, ref)
+ if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ return filedesc.PlaceholderMessage(ref.FullName()), nil
+ } else if err == protoregistry.NotFound {
+ return nil, errors.New("%q not found", ref.FullName())
+ } else if err != nil {
+ return nil, err
+ }
+ md, ok := d.(protoreflect.MessageDescriptor)
+ if !ok {
+ return nil, errors.New("resolved %q, but it is not an message", d.FullName())
+ }
+ return md, nil
+}
+
+// partialName is the partial name. A leading dot means that the name is full,
+// otherwise the name is relative to some current scope.
+// See google.protobuf.FieldDescriptorProto.type_name.
+type partialName string
+
+func (s partialName) IsFull() bool {
+ return len(s) > 0 && s[0] == '.'
+}
+
+func (s partialName) IsValid() bool {
+ if s.IsFull() {
+ return protoreflect.FullName(s[1:]).IsValid()
+ }
+ return protoreflect.FullName(s).IsValid()
+}
+
+const unknownPrefix = "*."
+
+// FullName converts the partial name to a full name on a best-effort basis.
+// If relative, it creates an invalid full name, using a "*." prefix
+// to indicate that the start of the full name is unknown.
+func (s partialName) FullName() protoreflect.FullName {
+ if s.IsFull() {
+ return protoreflect.FullName(s[1:])
+ }
+ return protoreflect.FullName(unknownPrefix + s)
+}
+
+func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) {
+ var evs protoreflect.EnumValueDescriptors
+ if fd.Enum() != nil {
+ evs = fd.Enum().Values()
+ }
+ v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor)
+ if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() {
+ v = protoreflect.ValueOfEnum(0)
+ if evs.Len() > 0 {
+ v = protoreflect.ValueOfEnum(evs.Get(0).Number())
+ }
+ ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s)))
+ } else if err != nil {
+ return v, ev, err
+ }
+ if fd.Syntax() == protoreflect.Proto3 {
+ return v, ev, errors.New("cannot be specified under proto3 semantics")
+ }
+ if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated {
+ return v, ev, errors.New("cannot be specified on composite types")
+ }
+ return v, ev, nil
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
new file mode 100644
index 0000000..9af1d56
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
@@ -0,0 +1,374 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+ "strings"
+ "unicode"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error {
+ for i, ed := range eds {
+ e := &es[i]
+ if err := e.L2.ReservedNames.CheckValid(); err != nil {
+ return errors.New("enum %q reserved names has %v", e.FullName(), err)
+ }
+ if err := e.L2.ReservedRanges.CheckValid(); err != nil {
+ return errors.New("enum %q reserved ranges has %v", e.FullName(), err)
+ }
+ if len(ed.GetValue()) == 0 {
+ return errors.New("enum %q must contain at least one value declaration", e.FullName())
+ }
+ allowAlias := ed.GetOptions().GetAllowAlias()
+ foundAlias := false
+ for i := 0; i < e.Values().Len(); i++ {
+ v1 := e.Values().Get(i)
+ if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 {
+ foundAlias = true
+ if !allowAlias {
+ return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name())
+ }
+ }
+ }
+ if allowAlias && !foundAlias {
+ return errors.New("enum %q allows aliases, but none were found", e.FullName())
+ }
+ if e.Syntax() == protoreflect.Proto3 {
+ if v := e.Values().Get(0); v.Number() != 0 {
+ return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName())
+ }
+ // Verify that value names in proto3 do not conflict if the
+ // case-insensitive prefix is removed.
+ // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055
+ names := map[string]protoreflect.EnumValueDescriptor{}
+ prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1)
+ for i := 0; i < e.Values().Len(); i++ {
+ v1 := e.Values().Get(i)
+ s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix))
+ if v2, ok := names[s]; ok && v1.Number() != v2.Number() {
+ return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name())
+ }
+ names[s] = v1
+ }
+ }
+
+ for j, vd := range ed.GetValue() {
+ v := &e.L2.Values.List[j]
+ if vd.Number == nil {
+ return errors.New("enum value %q must have a specified number", v.FullName())
+ }
+ if e.L2.ReservedNames.Has(v.Name()) {
+ return errors.New("enum value %q must not use reserved name", v.FullName())
+ }
+ if e.L2.ReservedRanges.Has(v.Number()) {
+ return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number())
+ }
+ }
+ }
+ return nil
+}
+
+func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error {
+ for i, md := range mds {
+ m := &ms[i]
+
+ // Handle the message descriptor itself.
+ isMessageSet := md.GetOptions().GetMessageSetWireFormat()
+ if err := m.L2.ReservedNames.CheckValid(); err != nil {
+ return errors.New("message %q reserved names has %v", m.FullName(), err)
+ }
+ if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil {
+ return errors.New("message %q reserved ranges has %v", m.FullName(), err)
+ }
+ if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil {
+ return errors.New("message %q extension ranges has %v", m.FullName(), err)
+ }
+ if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil {
+ return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err)
+ }
+ for i := 0; i < m.Fields().Len(); i++ {
+ f1 := m.Fields().Get(i)
+ if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 {
+ return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name())
+ }
+ }
+ if isMessageSet && !flags.ProtoLegacy {
+ return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName())
+ }
+ if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) {
+ return errors.New("message %q is an invalid proto1 MessageSet", m.FullName())
+ }
+ if m.Syntax() == protoreflect.Proto3 {
+ if m.ExtensionRanges().Len() > 0 {
+ return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName())
+ }
+ // Verify that field names in proto3 do not conflict if lowercased
+ // with all underscores removed.
+ // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847
+ names := map[string]protoreflect.FieldDescriptor{}
+ for i := 0; i < m.Fields().Len(); i++ {
+ f1 := m.Fields().Get(i)
+ s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1)
+ if f2, ok := names[s]; ok {
+ return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name())
+ }
+ names[s] = f1
+ }
+ }
+
+ for j, fd := range md.GetField() {
+ f := &m.L2.Fields.List[j]
+ if m.L2.ReservedNames.Has(f.Name()) {
+ return errors.New("message field %q must not use reserved name", f.FullName())
+ }
+ if !f.Number().IsValid() {
+ return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number())
+ }
+ if !f.Cardinality().IsValid() {
+ return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality())
+ }
+ if m.L2.ReservedRanges.Has(f.Number()) {
+ return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number())
+ }
+ if m.L2.ExtensionRanges.Has(f.Number()) {
+ return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number())
+ }
+ if fd.Extendee != nil {
+ return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee())
+ }
+ if f.L1.IsProto3Optional {
+ if f.Syntax() != protoreflect.Proto3 {
+ return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName())
+ }
+ if f.Cardinality() != protoreflect.Optional {
+ return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName())
+ }
+ if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 {
+ return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName())
+ }
+ }
+ if f.IsWeak() && !flags.ProtoLegacy {
+ return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName())
+ }
+ if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
+ return errors.New("message field %q may only be weak for an optional message", f.FullName())
+ }
+ if f.IsPacked() && !isPackable(f) {
+ return errors.New("message field %q is not packable", f.FullName())
+ }
+ if err := checkValidGroup(f); err != nil {
+ return errors.New("message field %q is an invalid group: %v", f.FullName(), err)
+ }
+ if err := checkValidMap(f); err != nil {
+ return errors.New("message field %q is an invalid map: %v", f.FullName(), err)
+ }
+ if f.Syntax() == protoreflect.Proto3 {
+ if f.Cardinality() == protoreflect.Required {
+ return errors.New("message field %q using proto3 semantics cannot be required", f.FullName())
+ }
+ if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 {
+ return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName())
+ }
+ }
+ }
+ seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs
+ for j := range md.GetOneofDecl() {
+ o := &m.L2.Oneofs.List[j]
+ if o.Fields().Len() == 0 {
+ return errors.New("message oneof %q must contain at least one field declaration", o.FullName())
+ }
+ if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) {
+ return errors.New("message oneof %q must have consecutively declared fields", o.FullName())
+ }
+
+ if o.IsSynthetic() {
+ seenSynthetic = true
+ continue
+ }
+ if !o.IsSynthetic() && seenSynthetic {
+ return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName())
+ }
+
+ for i := 0; i < o.Fields().Len(); i++ {
+ f := o.Fields().Get(i)
+ if f.Cardinality() != protoreflect.Optional {
+ return errors.New("message field %q belongs in a oneof and must be optional", f.FullName())
+ }
+ if f.IsWeak() {
+ return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName())
+ }
+ }
+ }
+
+ if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil {
+ return err
+ }
+ if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil {
+ return err
+ }
+ if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error {
+ for i, xd := range xds {
+ x := &xs[i]
+ // NOTE: Avoid using the IsValid method since extensions to MessageSet
+ // may have a field number higher than normal. This check only verifies
+ // that the number is not negative or reserved. We check again later
+ // if we know that the extendee is definitely not a MessageSet.
+ if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) {
+ return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
+ }
+ if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required {
+ return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality())
+ }
+ if xd.JsonName != nil {
+ // A bug in older versions of protoc would always populate the
+ // "json_name" option for extensions when it is meaningless.
+ // When it did so, it would always use the camel-cased field name.
+ if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) {
+ return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName())
+ }
+ }
+ if xd.OneofIndex != nil {
+ return errors.New("extension field %q may not be part of a oneof", x.FullName())
+ }
+ if md := x.ContainingMessage(); !md.IsPlaceholder() {
+ if !md.ExtensionRanges().Has(x.Number()) {
+ return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number())
+ }
+ isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat()
+ if isMessageSet && !isOptionalMessage(x) {
+ return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName())
+ }
+ if !isMessageSet && !x.Number().IsValid() {
+ return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
+ }
+ }
+ if xd.GetOptions().GetWeak() {
+ return errors.New("extension field %q cannot be a weak reference", x.FullName())
+ }
+ if x.IsPacked() && !isPackable(x) {
+ return errors.New("extension field %q is not packable", x.FullName())
+ }
+ if err := checkValidGroup(x); err != nil {
+ return errors.New("extension field %q is an invalid group: %v", x.FullName(), err)
+ }
+ if md := x.Message(); md != nil && md.IsMapEntry() {
+ return errors.New("extension field %q cannot be a map entry", x.FullName())
+ }
+ if x.Syntax() == protoreflect.Proto3 {
+ switch x.ContainingMessage().FullName() {
+ case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName():
+ case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName():
+ default:
+ return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName())
+ }
+ }
+ }
+ return nil
+}
+
+// isOptionalMessage reports whether this is an optional message.
+// If the kind is unknown, it is assumed to be a message.
+func isOptionalMessage(fd protoreflect.FieldDescriptor) bool {
+ return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional
+}
+
+// isPackable checks whether the pack option can be specified.
+func isPackable(fd protoreflect.FieldDescriptor) bool {
+ switch fd.Kind() {
+ case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
+ return false
+ }
+ return fd.IsList()
+}
+
+// checkValidGroup reports whether fd is a valid group according to the same
+// rules that protoc imposes.
+func checkValidGroup(fd protoreflect.FieldDescriptor) error {
+ md := fd.Message()
+ switch {
+ case fd.Kind() != protoreflect.GroupKind:
+ return nil
+ case fd.Syntax() != protoreflect.Proto2:
+ return errors.New("invalid under proto2 semantics")
+ case md == nil || md.IsPlaceholder():
+ return errors.New("message must be resolvable")
+ case fd.FullName().Parent() != md.FullName().Parent():
+ return errors.New("message and field must be declared in the same scope")
+ case !unicode.IsUpper(rune(md.Name()[0])):
+ return errors.New("message name must start with an uppercase")
+ case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))):
+ return errors.New("field name must be lowercased form of the message name")
+ }
+ return nil
+}
+
+// checkValidMap checks whether the field is a valid map according to the same
+// rules that protoc imposes.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115
+func checkValidMap(fd protoreflect.FieldDescriptor) error {
+ md := fd.Message()
+ switch {
+ case md == nil || !md.IsMapEntry():
+ return nil
+ case fd.FullName().Parent() != md.FullName().Parent():
+ return errors.New("message and field must be declared in the same scope")
+ case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))):
+ return errors.New("incorrect implicit map entry name")
+ case fd.Cardinality() != protoreflect.Repeated:
+ return errors.New("field must be repeated")
+ case md.Fields().Len() != 2:
+ return errors.New("message must have exactly two fields")
+ case md.ExtensionRanges().Len() > 0:
+ return errors.New("message must not have any extension ranges")
+ case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0:
+ return errors.New("message must not have any nested declarations")
+ }
+ kf := md.Fields().Get(0)
+ vf := md.Fields().Get(1)
+ switch {
+ case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault():
+ return errors.New("invalid key field")
+ case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault():
+ return errors.New("invalid value field")
+ }
+ switch kf.Kind() {
+ case protoreflect.BoolKind: // bool
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64
+ case protoreflect.StringKind: // string
+ default:
+ return errors.New("invalid key kind: %v", kf.Kind())
+ }
+ if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 {
+ return errors.New("map enum value must have zero number for the first value")
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
new file mode 100644
index 0000000..a7c5cef
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -0,0 +1,252 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+ "fmt"
+ "strings"
+
+ "google.golang.org/protobuf/internal/encoding/defval"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a
+// google.protobuf.FileDescriptorProto message.
+func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
+ p := &descriptorpb.FileDescriptorProto{
+ Name: proto.String(file.Path()),
+ Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions),
+ }
+ if file.Package() != "" {
+ p.Package = proto.String(string(file.Package()))
+ }
+ for i, imports := 0, file.Imports(); i < imports.Len(); i++ {
+ imp := imports.Get(i)
+ p.Dependency = append(p.Dependency, imp.Path())
+ if imp.IsPublic {
+ p.PublicDependency = append(p.PublicDependency, int32(i))
+ }
+ if imp.IsWeak {
+ p.WeakDependency = append(p.WeakDependency, int32(i))
+ }
+ }
+ for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ {
+ loc := locs.Get(i)
+ l := &descriptorpb.SourceCodeInfo_Location{}
+ l.Path = append(l.Path, loc.Path...)
+ if loc.StartLine == loc.EndLine {
+ l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)}
+ } else {
+ l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)}
+ }
+ l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...)
+ if loc.LeadingComments != "" {
+ l.LeadingComments = proto.String(loc.LeadingComments)
+ }
+ if loc.TrailingComments != "" {
+ l.TrailingComments = proto.String(loc.TrailingComments)
+ }
+ if p.SourceCodeInfo == nil {
+ p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{}
+ }
+ p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l)
+
+ }
+ for i, messages := 0, file.Messages(); i < messages.Len(); i++ {
+ p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i)))
+ }
+ for i, enums := 0, file.Enums(); i < enums.Len(); i++ {
+ p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))
+ }
+ for i, services := 0, file.Services(); i < services.Len(); i++ {
+ p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i)))
+ }
+ for i, exts := 0, file.Extensions(); i < exts.Len(); i++ {
+ p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))
+ }
+ if syntax := file.Syntax(); syntax != protoreflect.Proto2 {
+ p.Syntax = proto.String(file.Syntax().String())
+ }
+ return p
+}
+
+// ToDescriptorProto copies a protoreflect.MessageDescriptor into a
+// google.protobuf.DescriptorProto message.
+func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
+ p := &descriptorpb.DescriptorProto{
+ Name: proto.String(string(message.Name())),
+ Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions),
+ }
+ for i, fields := 0, message.Fields(); i < fields.Len(); i++ {
+ p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i)))
+ }
+ for i, exts := 0, message.Extensions(); i < exts.Len(); i++ {
+ p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))
+ }
+ for i, messages := 0, message.Messages(); i < messages.Len(); i++ {
+ p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i)))
+ }
+ for i, enums := 0, message.Enums(); i < enums.Len(); i++ {
+ p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))
+ }
+ for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ {
+ xrange := xranges.Get(i)
+ p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{
+ Start: proto.Int32(int32(xrange[0])),
+ End: proto.Int32(int32(xrange[1])),
+ Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions),
+ })
+ }
+ for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ {
+ p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i)))
+ }
+ for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ {
+ rrange := ranges.Get(i)
+ p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{
+ Start: proto.Int32(int32(rrange[0])),
+ End: proto.Int32(int32(rrange[1])),
+ })
+ }
+ for i, names := 0, message.ReservedNames(); i < names.Len(); i++ {
+ p.ReservedName = append(p.ReservedName, string(names.Get(i)))
+ }
+ return p
+}
+
+// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a
+// google.protobuf.FieldDescriptorProto message.
+func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
+ p := &descriptorpb.FieldDescriptorProto{
+ Name: proto.String(string(field.Name())),
+ Number: proto.Int32(int32(field.Number())),
+ Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(),
+ Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions),
+ }
+ if field.IsExtension() {
+ p.Extendee = fullNameOf(field.ContainingMessage())
+ }
+ if field.Kind().IsValid() {
+ p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum()
+ }
+ if field.Enum() != nil {
+ p.TypeName = fullNameOf(field.Enum())
+ }
+ if field.Message() != nil {
+ p.TypeName = fullNameOf(field.Message())
+ }
+ if field.HasJSONName() {
+ // A bug in older versions of protoc would always populate the
+ // "json_name" option for extensions when it is meaningless.
+ // When it did so, it would always use the camel-cased field name.
+ if field.IsExtension() {
+ p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name())))
+ } else {
+ p.JsonName = proto.String(field.JSONName())
+ }
+ }
+ if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() {
+ p.Proto3Optional = proto.Bool(true)
+ }
+ if field.HasDefault() {
+ def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor)
+ if err != nil && field.DefaultEnumValue() != nil {
+ def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values
+ } else if err != nil {
+ panic(fmt.Sprintf("%v: %v", field.FullName(), err))
+ }
+ p.DefaultValue = proto.String(def)
+ }
+ if oneof := field.ContainingOneof(); oneof != nil {
+ p.OneofIndex = proto.Int32(int32(oneof.Index()))
+ }
+ return p
+}
+
+// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a
+// google.protobuf.OneofDescriptorProto message.
+func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
+ return &descriptorpb.OneofDescriptorProto{
+ Name: proto.String(string(oneof.Name())),
+ Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions),
+ }
+}
+
+// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a
+// google.protobuf.EnumDescriptorProto message.
+func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
+ p := &descriptorpb.EnumDescriptorProto{
+ Name: proto.String(string(enum.Name())),
+ Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions),
+ }
+ for i, values := 0, enum.Values(); i < values.Len(); i++ {
+ p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i)))
+ }
+ for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ {
+ rrange := ranges.Get(i)
+ p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{
+ Start: proto.Int32(int32(rrange[0])),
+ End: proto.Int32(int32(rrange[1])),
+ })
+ }
+ for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ {
+ p.ReservedName = append(p.ReservedName, string(names.Get(i)))
+ }
+ return p
+}
+
+// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a
+// google.protobuf.EnumValueDescriptorProto message.
+func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
+ return &descriptorpb.EnumValueDescriptorProto{
+ Name: proto.String(string(value.Name())),
+ Number: proto.Int32(int32(value.Number())),
+ Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions),
+ }
+}
+
+// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a
+// google.protobuf.ServiceDescriptorProto message.
+func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
+ p := &descriptorpb.ServiceDescriptorProto{
+ Name: proto.String(string(service.Name())),
+ Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions),
+ }
+ for i, methods := 0, service.Methods(); i < methods.Len(); i++ {
+ p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i)))
+ }
+ return p
+}
+
+// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a
+// google.protobuf.MethodDescriptorProto message.
+func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
+ p := &descriptorpb.MethodDescriptorProto{
+ Name: proto.String(string(method.Name())),
+ InputType: fullNameOf(method.Input()),
+ OutputType: fullNameOf(method.Output()),
+ Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions),
+ }
+ if method.IsStreamingClient() {
+ p.ClientStreaming = proto.Bool(true)
+ }
+ if method.IsStreamingServer() {
+ p.ServerStreaming = proto.Bool(true)
+ }
+ return p
+}
+
+func fullNameOf(d protoreflect.Descriptor) *string {
+ if d == nil {
+ return nil
+ }
+ if strings.HasPrefix(string(d.FullName()), unknownPrefix) {
+ return proto.String(string(d.FullName()[len(unknownPrefix):]))
+ }
+ return proto.String("." + string(d.FullName()))
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
new file mode 100644
index 0000000..6be5d16
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -0,0 +1,77 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+// The following types are used by the fast-path Message.ProtoMethods method.
+//
+// To avoid polluting the public protoreflect API with types used only by
+// low-level implementations, the canonical definitions of these types are
+// in the runtime/protoiface package. The definitions here and in protoiface
+// must be kept in sync.
+type (
+ methods = struct {
+ pragma.NoUnkeyedLiterals
+ Flags supportFlags
+ Size func(sizeInput) sizeOutput
+ Marshal func(marshalInput) (marshalOutput, error)
+ Unmarshal func(unmarshalInput) (unmarshalOutput, error)
+ Merge func(mergeInput) mergeOutput
+ CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+ }
+ supportFlags = uint64
+ sizeInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ Flags uint8
+ }
+ sizeOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Size int
+ }
+ marshalInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ Buf []byte
+ Flags uint8
+ }
+ marshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Buf []byte
+ }
+ unmarshalInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ Buf []byte
+ Flags uint8
+ Resolver interface {
+ FindExtensionByName(field FullName) (ExtensionType, error)
+ FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error)
+ }
+ }
+ unmarshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Flags uint8
+ }
+ mergeInput = struct {
+ pragma.NoUnkeyedLiterals
+ Source Message
+ Destination Message
+ }
+ mergeOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Flags uint8
+ }
+ checkInitializedInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ }
+ checkInitializedOutput = struct {
+ pragma.NoUnkeyedLiterals
+ }
+)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
new file mode 100644
index 0000000..dd85915
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -0,0 +1,504 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoreflect provides interfaces to dynamically manipulate messages.
+//
+// This package includes type descriptors which describe the structure of types
+// defined in proto source files and value interfaces which provide the
+// ability to examine and manipulate the contents of messages.
+//
+//
+// Protocol Buffer Descriptors
+//
+// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor)
+// are immutable objects that represent protobuf type information.
+// They are wrappers around the messages declared in descriptor.proto.
+// Protobuf descriptors alone lack any information regarding Go types.
+//
+// Enums and messages generated by this module implement Enum and ProtoMessage,
+// where the Descriptor and ProtoReflect.Descriptor accessors respectively
+// return the protobuf descriptor for the values.
+//
+// The protobuf descriptor interfaces are not meant to be implemented by
+// user code since they might need to be extended in the future to support
+// additions to the protobuf language.
+// The "google.golang.org/protobuf/reflect/protodesc" package converts between
+// google.protobuf.DescriptorProto messages and protobuf descriptors.
+//
+//
+// Go Type Descriptors
+//
+// A type descriptor (e.g., EnumType or MessageType) is a constructor for
+// a concrete Go type that represents the associated protobuf descriptor.
+// There is commonly a one-to-one relationship between protobuf descriptors and
+// Go type descriptors, but it can potentially be a one-to-many relationship.
+//
+// Enums and messages generated by this module implement Enum and ProtoMessage,
+// where the Type and ProtoReflect.Type accessors respectively
+// return the protobuf descriptor for the values.
+//
+// The "google.golang.org/protobuf/types/dynamicpb" package can be used to
+// create Go type descriptors from protobuf descriptors.
+//
+//
+// Value Interfaces
+//
+// The Enum and Message interfaces provide a reflective view over an
+// enum or message instance. For enums, it provides the ability to retrieve
+// the enum value number for any concrete enum type. For messages, it provides
+// the ability to access or manipulate fields of the message.
+//
+// To convert a proto.Message to a protoreflect.Message, use the
+// former's ProtoReflect method. Since the ProtoReflect method is new to the
+// v2 message interface, it may not be present on older message implementations.
+// The "github.com/golang/protobuf/proto".MessageReflect function can be used
+// to obtain a reflective view on older messages.
+//
+//
+// Relationships
+//
+// The following diagrams demonstrate the relationships between
+// various types declared in this package.
+//
+//
+// ┌───────────────────────────────────┐
+// V │
+// ┌────────────── New(n) ─────────────┐ │
+// │ │ │
+// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │
+// │ │ V V │ V │
+// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗
+// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║
+// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝
+// Λ Λ │ │
+// │ └─── Descriptor() ──┘ │
+// │ │
+// └────────────────── Type() ───────┘
+//
+// • An EnumType describes a concrete Go enum type.
+// It has an EnumDescriptor and can construct an Enum instance.
+//
+// • An EnumDescriptor describes an abstract protobuf enum type.
+//
+// • An Enum is a concrete enum instance. Generated enums implement Enum.
+//
+//
+// ┌──────────────── New() ─────────────────┐
+// │ │
+// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐
+// │ │ V V │ V
+// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗
+// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║
+// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝
+// Λ Λ │ │ Λ │
+// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘
+// │ │
+// └─────────────────── Type() ─────────┘
+//
+// • A MessageType describes a concrete Go message type.
+// It has a MessageDescriptor and can construct a Message instance.
+//
+// • A MessageDescriptor describes an abstract protobuf message type.
+//
+// • A Message is a concrete message instance. Generated messages implement
+// ProtoMessage, which can convert to/from a Message.
+//
+//
+// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐
+// │ V │ V
+// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗
+// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║
+// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝
+// Λ │ │ Λ │ Λ
+// └─────── Type() ───────┘ │ └─── may implement ────┘ │
+// │ │
+// └────── implements ────────┘
+//
+// • An ExtensionType describes a concrete Go implementation of an extension.
+// It has an ExtensionTypeDescriptor and can convert to/from
+// abstract Values and Go values.
+//
+// • An ExtensionTypeDescriptor is an ExtensionDescriptor
+// which also has an ExtensionType.
+//
+// • An ExtensionDescriptor describes an abstract protobuf extension field and
+// may not always be an ExtensionTypeDescriptor.
+package protoreflect
+
+import (
+ "fmt"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+type doNotImplement pragma.DoNotImplement
+
+// ProtoMessage is the top-level interface that all proto messages implement.
+// This is declared in the protoreflect package to avoid a cyclic dependency;
+// use the proto.Message type instead, which aliases this type.
+type ProtoMessage interface{ ProtoReflect() Message }
+
+// Syntax is the language version of the proto file.
+type Syntax syntax
+
+type syntax int8 // keep exact type opaque as the int type may change
+
+const (
+ Proto2 Syntax = 2
+ Proto3 Syntax = 3
+)
+
+// IsValid reports whether the syntax is valid.
+func (s Syntax) IsValid() bool {
+ switch s {
+ case Proto2, Proto3:
+ return true
+ default:
+ return false
+ }
+}
+
+// String returns s as a proto source identifier (e.g., "proto2").
+func (s Syntax) String() string {
+ switch s {
+ case Proto2:
+ return "proto2"
+ case Proto3:
+ return "proto3"
+ default:
+ return fmt.Sprintf("", s)
+ }
+}
+
+// GoString returns s as a Go source identifier (e.g., "Proto2").
+func (s Syntax) GoString() string {
+ switch s {
+ case Proto2:
+ return "Proto2"
+ case Proto3:
+ return "Proto3"
+ default:
+ return fmt.Sprintf("Syntax(%d)", s)
+ }
+}
+
+// Cardinality determines whether a field is optional, required, or repeated.
+type Cardinality cardinality
+
+type cardinality int8 // keep exact type opaque as the int type may change
+
+// Constants as defined by the google.protobuf.Cardinality enumeration.
+const (
+ Optional Cardinality = 1 // appears zero or one times
+ Required Cardinality = 2 // appears exactly one time; invalid with Proto3
+ Repeated Cardinality = 3 // appears zero or more times
+)
+
+// IsValid reports whether the cardinality is valid.
+func (c Cardinality) IsValid() bool {
+ switch c {
+ case Optional, Required, Repeated:
+ return true
+ default:
+ return false
+ }
+}
+
+// String returns c as a proto source identifier (e.g., "optional").
+func (c Cardinality) String() string {
+ switch c {
+ case Optional:
+ return "optional"
+ case Required:
+ return "required"
+ case Repeated:
+ return "repeated"
+ default:
+ return fmt.Sprintf("", c)
+ }
+}
+
+// GoString returns c as a Go source identifier (e.g., "Optional").
+func (c Cardinality) GoString() string {
+ switch c {
+ case Optional:
+ return "Optional"
+ case Required:
+ return "Required"
+ case Repeated:
+ return "Repeated"
+ default:
+ return fmt.Sprintf("Cardinality(%d)", c)
+ }
+}
+
+// Kind indicates the basic proto kind of a field.
+type Kind kind
+
+type kind int8 // keep exact type opaque as the int type may change
+
+// Constants as defined by the google.protobuf.Field.Kind enumeration.
+const (
+ BoolKind Kind = 8
+ EnumKind Kind = 14
+ Int32Kind Kind = 5
+ Sint32Kind Kind = 17
+ Uint32Kind Kind = 13
+ Int64Kind Kind = 3
+ Sint64Kind Kind = 18
+ Uint64Kind Kind = 4
+ Sfixed32Kind Kind = 15
+ Fixed32Kind Kind = 7
+ FloatKind Kind = 2
+ Sfixed64Kind Kind = 16
+ Fixed64Kind Kind = 6
+ DoubleKind Kind = 1
+ StringKind Kind = 9
+ BytesKind Kind = 12
+ MessageKind Kind = 11
+ GroupKind Kind = 10
+)
+
+// IsValid reports whether the kind is valid.
+func (k Kind) IsValid() bool {
+ switch k {
+ case BoolKind, EnumKind,
+ Int32Kind, Sint32Kind, Uint32Kind,
+ Int64Kind, Sint64Kind, Uint64Kind,
+ Sfixed32Kind, Fixed32Kind, FloatKind,
+ Sfixed64Kind, Fixed64Kind, DoubleKind,
+ StringKind, BytesKind, MessageKind, GroupKind:
+ return true
+ default:
+ return false
+ }
+}
+
+// String returns k as a proto source identifier (e.g., "bool").
+func (k Kind) String() string {
+ switch k {
+ case BoolKind:
+ return "bool"
+ case EnumKind:
+ return "enum"
+ case Int32Kind:
+ return "int32"
+ case Sint32Kind:
+ return "sint32"
+ case Uint32Kind:
+ return "uint32"
+ case Int64Kind:
+ return "int64"
+ case Sint64Kind:
+ return "sint64"
+ case Uint64Kind:
+ return "uint64"
+ case Sfixed32Kind:
+ return "sfixed32"
+ case Fixed32Kind:
+ return "fixed32"
+ case FloatKind:
+ return "float"
+ case Sfixed64Kind:
+ return "sfixed64"
+ case Fixed64Kind:
+ return "fixed64"
+ case DoubleKind:
+ return "double"
+ case StringKind:
+ return "string"
+ case BytesKind:
+ return "bytes"
+ case MessageKind:
+ return "message"
+ case GroupKind:
+ return "group"
+ default:
+ return fmt.Sprintf("", k)
+ }
+}
+
+// GoString returns k as a Go source identifier (e.g., "BoolKind").
+func (k Kind) GoString() string {
+ switch k {
+ case BoolKind:
+ return "BoolKind"
+ case EnumKind:
+ return "EnumKind"
+ case Int32Kind:
+ return "Int32Kind"
+ case Sint32Kind:
+ return "Sint32Kind"
+ case Uint32Kind:
+ return "Uint32Kind"
+ case Int64Kind:
+ return "Int64Kind"
+ case Sint64Kind:
+ return "Sint64Kind"
+ case Uint64Kind:
+ return "Uint64Kind"
+ case Sfixed32Kind:
+ return "Sfixed32Kind"
+ case Fixed32Kind:
+ return "Fixed32Kind"
+ case FloatKind:
+ return "FloatKind"
+ case Sfixed64Kind:
+ return "Sfixed64Kind"
+ case Fixed64Kind:
+ return "Fixed64Kind"
+ case DoubleKind:
+ return "DoubleKind"
+ case StringKind:
+ return "StringKind"
+ case BytesKind:
+ return "BytesKind"
+ case MessageKind:
+ return "MessageKind"
+ case GroupKind:
+ return "GroupKind"
+ default:
+ return fmt.Sprintf("Kind(%d)", k)
+ }
+}
+
+// FieldNumber is the field number in a message.
+type FieldNumber = protowire.Number
+
+// FieldNumbers represent a list of field numbers.
+type FieldNumbers interface {
+ // Len reports the number of fields in the list.
+ Len() int
+ // Get returns the ith field number. It panics if out of bounds.
+ Get(i int) FieldNumber
+ // Has reports whether n is within the list of fields.
+ Has(n FieldNumber) bool
+
+ doNotImplement
+}
+
+// FieldRanges represent a list of field number ranges.
+type FieldRanges interface {
+ // Len reports the number of ranges in the list.
+ Len() int
+ // Get returns the ith range. It panics if out of bounds.
+ Get(i int) [2]FieldNumber // start inclusive; end exclusive
+ // Has reports whether n is within any of the ranges.
+ Has(n FieldNumber) bool
+
+ doNotImplement
+}
+
+// EnumNumber is the numeric value for an enum.
+type EnumNumber int32
+
+// EnumRanges represent a list of enum number ranges.
+type EnumRanges interface {
+ // Len reports the number of ranges in the list.
+ Len() int
+ // Get returns the ith range. It panics if out of bounds.
+ Get(i int) [2]EnumNumber // start inclusive; end inclusive
+ // Has reports whether n is within any of the ranges.
+ Has(n EnumNumber) bool
+
+ doNotImplement
+}
+
+// Name is the short name for a proto declaration. This is not the name
+// as used in Go source code, which might not be identical to the proto name.
+type Name string // e.g., "Kind"
+
+// IsValid reports whether s is a syntactically valid name.
+// An empty name is invalid.
+func (s Name) IsValid() bool {
+ return consumeIdent(string(s)) == len(s)
+}
+
+// Names represent a list of names.
+type Names interface {
+ // Len reports the number of names in the list.
+ Len() int
+ // Get returns the ith name. It panics if out of bounds.
+ Get(i int) Name
+ // Has reports whether s matches any names in the list.
+ Has(s Name) bool
+
+ doNotImplement
+}
+
+// FullName is a qualified name that uniquely identifies a proto declaration.
+// A qualified name is the concatenation of the proto package along with the
+// fully-declared name (i.e., name of parent preceding the name of the child),
+// with a '.' delimiter placed between each Name.
+//
+// This should not have any leading or trailing dots.
+type FullName string // e.g., "google.protobuf.Field.Kind"
+
+// IsValid reports whether s is a syntactically valid full name.
+// An empty full name is invalid.
+func (s FullName) IsValid() bool {
+ i := consumeIdent(string(s))
+ if i < 0 {
+ return false
+ }
+ for len(s) > i {
+ if s[i] != '.' {
+ return false
+ }
+ i++
+ n := consumeIdent(string(s[i:]))
+ if n < 0 {
+ return false
+ }
+ i += n
+ }
+ return true
+}
+
+func consumeIdent(s string) (i int) {
+ if len(s) == 0 || !isLetter(s[i]) {
+ return -1
+ }
+ i++
+ for len(s) > i && isLetterDigit(s[i]) {
+ i++
+ }
+ return i
+}
+func isLetter(c byte) bool {
+ return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')
+}
+func isLetterDigit(c byte) bool {
+ return isLetter(c) || ('0' <= c && c <= '9')
+}
+
+// Name returns the short name, which is the last identifier segment.
+// A single segment FullName is the Name itself.
+func (n FullName) Name() Name {
+ if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
+ return Name(n[i+1:])
+ }
+ return Name(n)
+}
+
+// Parent returns the full name with the trailing identifier removed.
+// A single segment FullName has no parent.
+func (n FullName) Parent() FullName {
+ if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
+ return n[:i]
+ }
+ return ""
+}
+
+// Append returns the qualified name appended with the provided short name.
+//
+// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid
+func (n FullName) Append(s Name) FullName {
+ if n == "" {
+ return FullName(s)
+ }
+ return n + "." + FullName(s)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
new file mode 100644
index 0000000..121ba3a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
@@ -0,0 +1,128 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+ "strconv"
+)
+
+// SourceLocations is a list of source locations.
+type SourceLocations interface {
+ // Len reports the number of source locations in the proto file.
+ Len() int
+ // Get returns the ith SourceLocation. It panics if out of bounds.
+ Get(int) SourceLocation
+
+ // ByPath returns the SourceLocation for the given path,
+ // returning the first location if multiple exist for the same path.
+ // If multiple locations exist for the same path,
+ // then SourceLocation.Next index can be used to identify the
+ // index of the next SourceLocation.
+ // If no location exists for this path, it returns the zero value.
+ ByPath(path SourcePath) SourceLocation
+
+ // ByDescriptor returns the SourceLocation for the given descriptor,
+ // returning the first location if multiple exist for the same path.
+ // If no location exists for this descriptor, it returns the zero value.
+ ByDescriptor(desc Descriptor) SourceLocation
+
+ doNotImplement
+}
+
+// SourceLocation describes a source location and
+// corresponds with the google.protobuf.SourceCodeInfo.Location message.
+type SourceLocation struct {
+ // Path is the path to the declaration from the root file descriptor.
+ // The contents of this slice must not be mutated.
+ Path SourcePath
+
+ // StartLine and StartColumn are the zero-indexed starting location
+ // in the source file for the declaration.
+ StartLine, StartColumn int
+ // EndLine and EndColumn are the zero-indexed ending location
+ // in the source file for the declaration.
+ // In the descriptor.proto, the end line may be omitted if it is identical
+ // to the start line. Here, it is always populated.
+ EndLine, EndColumn int
+
+ // LeadingDetachedComments are the leading detached comments
+ // for the declaration. The contents of this slice must not be mutated.
+ LeadingDetachedComments []string
+ // LeadingComments is the leading attached comment for the declaration.
+ LeadingComments string
+ // TrailingComments is the trailing attached comment for the declaration.
+ TrailingComments string
+
+ // Next is an index into SourceLocations for the next source location that
+ // has the same Path. It is zero if there is no next location.
+ Next int
+}
+
+// SourcePath identifies part of a file descriptor for a source location.
+// The SourcePath is a sequence of either field numbers or indexes into
+// a repeated field that form a path starting from the root file descriptor.
+//
+// See google.protobuf.SourceCodeInfo.Location.path.
+type SourcePath []int32
+
+// Equal reports whether p1 equals p2.
+func (p1 SourcePath) Equal(p2 SourcePath) bool {
+ if len(p1) != len(p2) {
+ return false
+ }
+ for i := range p1 {
+ if p1[i] != p2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// String formats the path in a humanly readable manner.
+// The output is guaranteed to be deterministic,
+// making it suitable for use as a key into a Go map.
+// It is not guaranteed to be stable as the exact output could change
+// in a future version of this module.
+//
+// Example output:
+// .message_type[6].nested_type[15].field[3]
+func (p SourcePath) String() string {
+ b := p.appendFileDescriptorProto(nil)
+ for _, i := range p {
+ b = append(b, '.')
+ b = strconv.AppendInt(b, int64(i), 10)
+ }
+ return string(b)
+}
+
+type appendFunc func(*SourcePath, []byte) []byte
+
+func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ b = append(b, '.')
+ b = append(b, name...)
+ *p = (*p)[1:]
+ if f != nil {
+ b = f(p, b)
+ }
+ return b
+}
+
+func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte {
+ b = p.appendSingularField(b, name, nil)
+ if len(*p) == 0 || (*p)[0] < 0 {
+ return b
+ }
+ b = append(b, '[')
+ b = strconv.AppendUint(b, uint64((*p)[0]), 10)
+ b = append(b, ']')
+ *p = (*p)[1:]
+ if f != nil {
+ b = f(p, b)
+ }
+ return b
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
new file mode 100644
index 0000000..b03c122
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -0,0 +1,461 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package protoreflect
+
+func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendSingularField(b, "package", nil)
+ case 3:
+ b = p.appendRepeatedField(b, "dependency", nil)
+ case 10:
+ b = p.appendRepeatedField(b, "public_dependency", nil)
+ case 11:
+ b = p.appendRepeatedField(b, "weak_dependency", nil)
+ case 4:
+ b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
+ case 5:
+ b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto)
+ case 6:
+ b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto)
+ case 7:
+ b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto)
+ case 8:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions)
+ case 9:
+ b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo)
+ case 12:
+ b = p.appendSingularField(b, "syntax", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto)
+ case 6:
+ b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto)
+ case 3:
+ b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto)
+ case 4:
+ b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto)
+ case 5:
+ b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange)
+ case 8:
+ b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto)
+ case 7:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions)
+ case 9:
+ b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
+ case 10:
+ b = p.appendRepeatedField(b, "reserved_name", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto)
+ case 3:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions)
+ case 4:
+ b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
+ case 5:
+ b = p.appendRepeatedField(b, "reserved_name", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto)
+ case 3:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions)
+ }
+ return b
+}
+
+func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 3:
+ b = p.appendSingularField(b, "number", nil)
+ case 4:
+ b = p.appendSingularField(b, "label", nil)
+ case 5:
+ b = p.appendSingularField(b, "type", nil)
+ case 6:
+ b = p.appendSingularField(b, "type_name", nil)
+ case 2:
+ b = p.appendSingularField(b, "extendee", nil)
+ case 7:
+ b = p.appendSingularField(b, "default_value", nil)
+ case 9:
+ b = p.appendSingularField(b, "oneof_index", nil)
+ case 10:
+ b = p.appendSingularField(b, "json_name", nil)
+ case 8:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions)
+ case 17:
+ b = p.appendSingularField(b, "proto3_optional", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendFileOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "java_package", nil)
+ case 8:
+ b = p.appendSingularField(b, "java_outer_classname", nil)
+ case 10:
+ b = p.appendSingularField(b, "java_multiple_files", nil)
+ case 20:
+ b = p.appendSingularField(b, "java_generate_equals_and_hash", nil)
+ case 27:
+ b = p.appendSingularField(b, "java_string_check_utf8", nil)
+ case 9:
+ b = p.appendSingularField(b, "optimize_for", nil)
+ case 11:
+ b = p.appendSingularField(b, "go_package", nil)
+ case 16:
+ b = p.appendSingularField(b, "cc_generic_services", nil)
+ case 17:
+ b = p.appendSingularField(b, "java_generic_services", nil)
+ case 18:
+ b = p.appendSingularField(b, "py_generic_services", nil)
+ case 42:
+ b = p.appendSingularField(b, "php_generic_services", nil)
+ case 23:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 31:
+ b = p.appendSingularField(b, "cc_enable_arenas", nil)
+ case 36:
+ b = p.appendSingularField(b, "objc_class_prefix", nil)
+ case 37:
+ b = p.appendSingularField(b, "csharp_namespace", nil)
+ case 39:
+ b = p.appendSingularField(b, "swift_prefix", nil)
+ case 40:
+ b = p.appendSingularField(b, "php_class_prefix", nil)
+ case 41:
+ b = p.appendSingularField(b, "php_namespace", nil)
+ case 44:
+ b = p.appendSingularField(b, "php_metadata_namespace", nil)
+ case 45:
+ b = p.appendSingularField(b, "ruby_package", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location)
+ }
+ return b
+}
+
+func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "start", nil)
+ case 2:
+ b = p.appendSingularField(b, "end", nil)
+ case 3:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions)
+ }
+ return b
+}
+
+func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions)
+ }
+ return b
+}
+
+func (p *SourcePath) appendMessageOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "message_set_wire_format", nil)
+ case 2:
+ b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil)
+ case 3:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 7:
+ b = p.appendSingularField(b, "map_entry", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "start", nil)
+ case 2:
+ b = p.appendSingularField(b, "end", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendSingularField(b, "number", nil)
+ case 3:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 2:
+ b = p.appendSingularField(b, "allow_alias", nil)
+ case 3:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "start", nil)
+ case 2:
+ b = p.appendSingularField(b, "end", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name", nil)
+ case 2:
+ b = p.appendSingularField(b, "input_type", nil)
+ case 3:
+ b = p.appendSingularField(b, "output_type", nil)
+ case 4:
+ b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions)
+ case 5:
+ b = p.appendSingularField(b, "client_streaming", nil)
+ case 6:
+ b = p.appendSingularField(b, "server_streaming", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendServiceOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 33:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendFieldOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "ctype", nil)
+ case 2:
+ b = p.appendSingularField(b, "packed", nil)
+ case 6:
+ b = p.appendSingularField(b, "jstype", nil)
+ case 5:
+ b = p.appendSingularField(b, "lazy", nil)
+ case 3:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 10:
+ b = p.appendSingularField(b, "weak", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendUninterpretedOption(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 2:
+ b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart)
+ case 3:
+ b = p.appendSingularField(b, "identifier_value", nil)
+ case 4:
+ b = p.appendSingularField(b, "positive_int_value", nil)
+ case 5:
+ b = p.appendSingularField(b, "negative_int_value", nil)
+ case 6:
+ b = p.appendSingularField(b, "double_value", nil)
+ case 7:
+ b = p.appendSingularField(b, "string_value", nil)
+ case 8:
+ b = p.appendSingularField(b, "aggregate_value", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendRepeatedField(b, "path", nil)
+ case 2:
+ b = p.appendRepeatedField(b, "span", nil)
+ case 3:
+ b = p.appendSingularField(b, "leading_comments", nil)
+ case 4:
+ b = p.appendSingularField(b, "trailing_comments", nil)
+ case 6:
+ b = p.appendRepeatedField(b, "leading_detached_comments", nil)
+ }
+ return b
+}
+
+func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendOneofOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendEnumValueOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendMethodOptions(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 33:
+ b = p.appendSingularField(b, "deprecated", nil)
+ case 34:
+ b = p.appendSingularField(b, "idempotency_level", nil)
+ case 999:
+ b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+ }
+ return b
+}
+
+func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "name_part", nil)
+ case 2:
+ b = p.appendSingularField(b, "is_extension", nil)
+ }
+ return b
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
new file mode 100644
index 0000000..8e53c44
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -0,0 +1,665 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+// Descriptor provides a set of accessors that are common to every descriptor.
+// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto,
+// but provides efficient lookup and immutability.
+//
+// Each descriptor is comparable. Equality implies that the two types are
+// exactly identical. However, it is possible for the same semantically
+// identical proto type to be represented by multiple type descriptors.
+//
+// For example, suppose we have t1 and t2 which are both MessageDescriptors.
+// If t1 == t2, then the types are definitely equal and all accessors return
+// the same information. However, if t1 != t2, then it is still possible that
+// they still represent the same proto type (e.g., t1.FullName == t2.FullName).
+// This can occur if a descriptor type is created dynamically, or multiple
+// versions of the same proto type are accidentally linked into the Go binary.
+type Descriptor interface {
+ // ParentFile returns the parent file descriptor that this descriptor
+ // is declared within. The parent file for the file descriptor is itself.
+ //
+ // Support for this functionality is optional and may return nil.
+ ParentFile() FileDescriptor
+
+ // Parent returns the parent containing this descriptor declaration.
+ // The following shows the mapping from child type to possible parent types:
+ //
+ // ╔═════════════════════╤═══════════════════════════════════╗
+ // ║ Child type │ Possible parent types ║
+ // ╠═════════════════════╪═══════════════════════════════════╣
+ // ║ FileDescriptor │ nil ║
+ // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║
+ // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║
+ // ║ OneofDescriptor │ MessageDescriptor ║
+ // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║
+ // ║ EnumValueDescriptor │ EnumDescriptor ║
+ // ║ ServiceDescriptor │ FileDescriptor ║
+ // ║ MethodDescriptor │ ServiceDescriptor ║
+ // ╚═════════════════════╧═══════════════════════════════════╝
+ //
+ // Support for this functionality is optional and may return nil.
+ Parent() Descriptor
+
+ // Index returns the index of this descriptor within its parent.
+ // It returns 0 if the descriptor does not have a parent or if the parent
+ // is unknown.
+ Index() int
+
+ // Syntax is the protobuf syntax.
+ Syntax() Syntax // e.g., Proto2 or Proto3
+
+ // Name is the short name of the declaration (i.e., FullName.Name).
+ Name() Name // e.g., "Any"
+
+ // FullName is the fully-qualified name of the declaration.
+ //
+ // The FullName is a concatenation of the full name of the type that this
+ // type is declared within and the declaration name. For example,
+ // field "foo_field" in message "proto.package.MyMessage" is
+ // uniquely identified as "proto.package.MyMessage.foo_field".
+ // Enum values are an exception to the rule (see EnumValueDescriptor).
+ FullName() FullName // e.g., "google.protobuf.Any"
+
+ // IsPlaceholder reports whether type information is missing since a
+ // dependency is not resolved, in which case only name information is known.
+ //
+ // Placeholder types may only be returned by the following accessors
+ // as a result of unresolved dependencies or weak imports:
+ //
+ // ╔═══════════════════════════════════╤═════════════════════╗
+ // ║ Accessor │ Descriptor ║
+ // ╠═══════════════════════════════════╪═════════════════════╣
+ // ║ FileImports.FileDescriptor │ FileDescriptor ║
+ // ║ FieldDescriptor.Enum │ EnumDescriptor ║
+ // ║ FieldDescriptor.Message │ MessageDescriptor ║
+ // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║
+ // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║
+ // ║ MethodDescriptor.Input │ MessageDescriptor ║
+ // ║ MethodDescriptor.Output │ MessageDescriptor ║
+ // ╚═══════════════════════════════════╧═════════════════════╝
+ //
+ // If true, only Name and FullName are valid.
+ // For FileDescriptor, the Path is also valid.
+ IsPlaceholder() bool
+
+ // Options returns the descriptor options. The caller must not modify
+ // the returned value.
+ //
+ // To avoid a dependency cycle, this function returns a proto.Message value.
+ // The proto message type returned for each descriptor type is as follows:
+ // ╔═════════════════════╤══════════════════════════════════════════╗
+ // ║ Go type │ Protobuf message type ║
+ // ╠═════════════════════╪══════════════════════════════════════════╣
+ // ║ FileDescriptor │ google.protobuf.FileOptions ║
+ // ║ EnumDescriptor │ google.protobuf.EnumOptions ║
+ // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║
+ // ║ MessageDescriptor │ google.protobuf.MessageOptions ║
+ // ║ FieldDescriptor │ google.protobuf.FieldOptions ║
+ // ║ OneofDescriptor │ google.protobuf.OneofOptions ║
+ // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║
+ // ║ MethodDescriptor │ google.protobuf.MethodOptions ║
+ // ╚═════════════════════╧══════════════════════════════════════════╝
+ //
+ // This method returns a typed nil-pointer if no options are present.
+ // The caller must import the descriptorpb package to use this.
+ Options() ProtoMessage
+
+ doNotImplement
+}
+
+// FileDescriptor describes the types in a complete proto file and
+// corresponds with the google.protobuf.FileDescriptorProto message.
+//
+// Top-level declarations:
+// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor.
+type FileDescriptor interface {
+ Descriptor // Descriptor.FullName is identical to Package
+
+ // Path returns the file name, relative to the source tree root.
+ Path() string // e.g., "path/to/file.proto"
+ // Package returns the protobuf package namespace.
+ Package() FullName // e.g., "google.protobuf"
+
+ // Imports is a list of imported proto files.
+ Imports() FileImports
+
+ // Enums is a list of the top-level enum declarations.
+ Enums() EnumDescriptors
+ // Messages is a list of the top-level message declarations.
+ Messages() MessageDescriptors
+ // Extensions is a list of the top-level extension declarations.
+ Extensions() ExtensionDescriptors
+ // Services is a list of the top-level service declarations.
+ Services() ServiceDescriptors
+
+ // SourceLocations is a list of source locations.
+ SourceLocations() SourceLocations
+
+ isFileDescriptor
+}
+type isFileDescriptor interface{ ProtoType(FileDescriptor) }
+
+// FileImports is a list of file imports.
+type FileImports interface {
+ // Len reports the number of files imported by this proto file.
+ Len() int
+ // Get returns the ith FileImport. It panics if out of bounds.
+ Get(i int) FileImport
+
+ doNotImplement
+}
+
+// FileImport is the declaration for a proto file import.
+type FileImport struct {
+ // FileDescriptor is the file type for the given import.
+ // It is a placeholder descriptor if IsWeak is set or if a dependency has
+ // not been regenerated to implement the new reflection APIs.
+ FileDescriptor
+
+ // IsPublic reports whether this is a public import, which causes this file
+ // to alias declarations within the imported file. The intended use cases
+ // for this feature is the ability to move proto files without breaking
+ // existing dependencies.
+ //
+ // The current file and the imported file must be within proto package.
+ IsPublic bool
+
+ // IsWeak reports whether this is a weak import, which does not impose
+ // a direct dependency on the target file.
+ //
+ // Weak imports are a legacy proto1 feature. Equivalent behavior is
+ // achieved using proto2 extension fields or proto3 Any messages.
+ IsWeak bool
+}
+
+// MessageDescriptor describes a message and
+// corresponds with the google.protobuf.DescriptorProto message.
+//
+// Nested declarations:
+// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor,
+// and/or MessageDescriptor.
+type MessageDescriptor interface {
+ Descriptor
+
+ // IsMapEntry indicates that this is an auto-generated message type to
+ // represent the entry type for a map field.
+ //
+ // Map entry messages have only two fields:
+ // • a "key" field with a field number of 1
+ // • a "value" field with a field number of 2
+ // The key and value types are determined by these two fields.
+ //
+ // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true
+ // for some field with this message type.
+ IsMapEntry() bool
+
+ // Fields is a list of nested field declarations.
+ Fields() FieldDescriptors
+ // Oneofs is a list of nested oneof declarations.
+ Oneofs() OneofDescriptors
+
+ // ReservedNames is a list of reserved field names.
+ ReservedNames() Names
+ // ReservedRanges is a list of reserved ranges of field numbers.
+ ReservedRanges() FieldRanges
+ // RequiredNumbers is a list of required field numbers.
+ // In Proto3, it is always an empty list.
+ RequiredNumbers() FieldNumbers
+ // ExtensionRanges is the field ranges used for extension fields.
+ // In Proto3, it is always an empty ranges.
+ ExtensionRanges() FieldRanges
+ // ExtensionRangeOptions returns the ith extension range options.
+ //
+ // To avoid a dependency cycle, this method returns a proto.Message value,
+ // which always contains a google.protobuf.ExtensionRangeOptions message.
+ // This method returns a typed nil-pointer if no options are present.
+ // The caller must import the descriptorpb package to use this.
+ ExtensionRangeOptions(i int) ProtoMessage
+
+ // Enums is a list of nested enum declarations.
+ Enums() EnumDescriptors
+ // Messages is a list of nested message declarations.
+ Messages() MessageDescriptors
+ // Extensions is a list of nested extension declarations.
+ Extensions() ExtensionDescriptors
+
+ isMessageDescriptor
+}
+type isMessageDescriptor interface{ ProtoType(MessageDescriptor) }
+
+// MessageType encapsulates a MessageDescriptor with a concrete Go implementation.
+// It is recommended that implementations of this interface also implement the
+// MessageFieldTypes interface.
+type MessageType interface {
+ // New returns a newly allocated empty message.
+ // It may return nil for synthetic messages representing a map entry.
+ New() Message
+
+ // Zero returns an empty, read-only message.
+ // It may return nil for synthetic messages representing a map entry.
+ Zero() Message
+
+ // Descriptor returns the message descriptor.
+ //
+ // Invariant: t.Descriptor() == t.New().Descriptor()
+ Descriptor() MessageDescriptor
+}
+
+// MessageFieldTypes extends a MessageType by providing type information
+// regarding enums and messages referenced by the message fields.
+type MessageFieldTypes interface {
+ MessageType
+
+ // Enum returns the EnumType for the ith field in Descriptor.Fields.
+ // It returns nil if the ith field is not an enum kind.
+ // It panics if out of bounds.
+ //
+ // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum()
+ Enum(i int) EnumType
+
+ // Message returns the MessageType for the ith field in Descriptor.Fields.
+ // It returns nil if the ith field is not a message or group kind.
+ // It panics if out of bounds.
+ //
+ // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message()
+ Message(i int) MessageType
+}
+
+// MessageDescriptors is a list of message declarations.
+type MessageDescriptors interface {
+ // Len reports the number of messages.
+ Len() int
+ // Get returns the ith MessageDescriptor. It panics if out of bounds.
+ Get(i int) MessageDescriptor
+ // ByName returns the MessageDescriptor for a message named s.
+ // It returns nil if not found.
+ ByName(s Name) MessageDescriptor
+
+ doNotImplement
+}
+
+// FieldDescriptor describes a field within a message and
+// corresponds with the google.protobuf.FieldDescriptorProto message.
+//
+// It is used for both normal fields defined within the parent message
+// (e.g., MessageDescriptor.Fields) and fields that extend some remote message
+// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions).
+type FieldDescriptor interface {
+ Descriptor
+
+ // Number reports the unique number for this field.
+ Number() FieldNumber
+ // Cardinality reports the cardinality for this field.
+ Cardinality() Cardinality
+ // Kind reports the basic kind for this field.
+ Kind() Kind
+
+ // HasJSONName reports whether this field has an explicitly set JSON name.
+ HasJSONName() bool
+
+ // JSONName reports the name used for JSON serialization.
+ // It is usually the camel-cased form of the field name.
+ // Extension fields are represented by the full name surrounded by brackets.
+ JSONName() string
+
+ // TextName reports the name used for text serialization.
+ // It is usually the name of the field, except that groups use the name
+ // of the inlined message, and extension fields are represented by the
+ // full name surrounded by brackets.
+ TextName() string
+
+ // HasPresence reports whether the field distinguishes between unpopulated
+ // and default values.
+ HasPresence() bool
+
+ // IsExtension reports whether this is an extension field. If false,
+ // then Parent and ContainingMessage refer to the same message.
+ // Otherwise, ContainingMessage and Parent likely differ.
+ IsExtension() bool
+
+ // HasOptionalKeyword reports whether the "optional" keyword was explicitly
+ // specified in the source .proto file.
+ HasOptionalKeyword() bool
+
+ // IsWeak reports whether this is a weak field, which does not impose a
+ // direct dependency on the target type.
+ // If true, then Message returns a placeholder type.
+ IsWeak() bool
+
+ // IsPacked reports whether repeated primitive numeric kinds should be
+ // serialized using a packed encoding.
+ // If true, then it implies Cardinality is Repeated.
+ IsPacked() bool
+
+ // IsList reports whether this field represents a list,
+ // where the value type for the associated field is a List.
+ // It is equivalent to checking whether Cardinality is Repeated and
+ // that IsMap reports false.
+ IsList() bool
+
+ // IsMap reports whether this field represents a map,
+ // where the value type for the associated field is a Map.
+ // It is equivalent to checking whether Cardinality is Repeated,
+ // that the Kind is MessageKind, and that Message.IsMapEntry reports true.
+ IsMap() bool
+
+ // MapKey returns the field descriptor for the key in the map entry.
+ // It returns nil if IsMap reports false.
+ MapKey() FieldDescriptor
+
+ // MapValue returns the field descriptor for the value in the map entry.
+ // It returns nil if IsMap reports false.
+ MapValue() FieldDescriptor
+
+ // HasDefault reports whether this field has a default value.
+ HasDefault() bool
+
+ // Default returns the default value for scalar fields.
+ // For proto2, it is the default value as specified in the proto file,
+ // or the zero value if unspecified.
+ // For proto3, it is always the zero value of the scalar.
+ // The Value type is determined by the Kind.
+ Default() Value
+
+ // DefaultEnumValue returns the enum value descriptor for the default value
+ // of an enum field, and is nil for any other kind of field.
+ DefaultEnumValue() EnumValueDescriptor
+
+ // ContainingOneof is the containing oneof that this field belongs to,
+ // and is nil if this field is not part of a oneof.
+ ContainingOneof() OneofDescriptor
+
+ // ContainingMessage is the containing message that this field belongs to.
+ // For extension fields, this may not necessarily be the parent message
+ // that the field is declared within.
+ ContainingMessage() MessageDescriptor
+
+ // Enum is the enum descriptor if Kind is EnumKind.
+ // It returns nil for any other Kind.
+ Enum() EnumDescriptor
+
+ // Message is the message descriptor if Kind is
+ // MessageKind or GroupKind. It returns nil for any other Kind.
+ Message() MessageDescriptor
+
+ isFieldDescriptor
+}
+type isFieldDescriptor interface{ ProtoType(FieldDescriptor) }
+
+// FieldDescriptors is a list of field declarations.
+type FieldDescriptors interface {
+ // Len reports the number of fields.
+ Len() int
+ // Get returns the ith FieldDescriptor. It panics if out of bounds.
+ Get(i int) FieldDescriptor
+ // ByName returns the FieldDescriptor for a field named s.
+ // It returns nil if not found.
+ ByName(s Name) FieldDescriptor
+ // ByJSONName returns the FieldDescriptor for a field with s as the JSON name.
+ // It returns nil if not found.
+ ByJSONName(s string) FieldDescriptor
+ // ByTextName returns the FieldDescriptor for a field with s as the text name.
+ // It returns nil if not found.
+ ByTextName(s string) FieldDescriptor
+ // ByNumber returns the FieldDescriptor for a field numbered n.
+ // It returns nil if not found.
+ ByNumber(n FieldNumber) FieldDescriptor
+
+ doNotImplement
+}
+
+// OneofDescriptor describes a oneof field set within a given message and
+// corresponds with the google.protobuf.OneofDescriptorProto message.
+type OneofDescriptor interface {
+ Descriptor
+
+ // IsSynthetic reports whether this is a synthetic oneof created to support
+ // proto3 optional semantics. If true, Fields contains exactly one field
+ // with HasOptionalKeyword specified.
+ IsSynthetic() bool
+
+ // Fields is a list of fields belonging to this oneof.
+ Fields() FieldDescriptors
+
+ isOneofDescriptor
+}
+type isOneofDescriptor interface{ ProtoType(OneofDescriptor) }
+
+// OneofDescriptors is a list of oneof declarations.
+type OneofDescriptors interface {
+ // Len reports the number of oneof fields.
+ Len() int
+ // Get returns the ith OneofDescriptor. It panics if out of bounds.
+ Get(i int) OneofDescriptor
+ // ByName returns the OneofDescriptor for a oneof named s.
+ // It returns nil if not found.
+ ByName(s Name) OneofDescriptor
+
+ doNotImplement
+}
+
+// ExtensionDescriptor is an alias of FieldDescriptor for documentation.
+type ExtensionDescriptor = FieldDescriptor
+
+// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType.
+type ExtensionTypeDescriptor interface {
+ ExtensionDescriptor
+
+ // Type returns the associated ExtensionType.
+ Type() ExtensionType
+
+ // Descriptor returns the plain ExtensionDescriptor without the
+ // associated ExtensionType.
+ Descriptor() ExtensionDescriptor
+}
+
+// ExtensionDescriptors is a list of field declarations.
+type ExtensionDescriptors interface {
+ // Len reports the number of fields.
+ Len() int
+ // Get returns the ith ExtensionDescriptor. It panics if out of bounds.
+ Get(i int) ExtensionDescriptor
+ // ByName returns the ExtensionDescriptor for a field named s.
+ // It returns nil if not found.
+ ByName(s Name) ExtensionDescriptor
+
+ doNotImplement
+}
+
+// ExtensionType encapsulates an ExtensionDescriptor with a concrete
+// Go implementation. The nested field descriptor must be for a extension field.
+//
+// While a normal field is a member of the parent message that it is declared
+// within (see Descriptor.Parent), an extension field is a member of some other
+// target message (see ExtensionDescriptor.Extendee) and may have no
+// relationship with the parent. However, the full name of an extension field is
+// relative to the parent that it is declared within.
+//
+// For example:
+// syntax = "proto2";
+// package example;
+// message FooMessage {
+// extensions 100 to max;
+// }
+// message BarMessage {
+// extends FooMessage { optional BarMessage bar_field = 100; }
+// }
+//
+// Field "bar_field" is an extension of FooMessage, but its full name is
+// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field".
+type ExtensionType interface {
+ // New returns a new value for the field.
+ // For scalars, this returns the default value in native Go form.
+ New() Value
+
+ // Zero returns a new value for the field.
+ // For scalars, this returns the default value in native Go form.
+ // For composite types, this returns an empty, read-only message, list, or map.
+ Zero() Value
+
+ // TypeDescriptor returns the extension type descriptor.
+ TypeDescriptor() ExtensionTypeDescriptor
+
+ // ValueOf wraps the input and returns it as a Value.
+ // ValueOf panics if the input value is invalid or not the appropriate type.
+ //
+ // ValueOf is more extensive than protoreflect.ValueOf for a given field's
+ // value as it has more type information available.
+ ValueOf(interface{}) Value
+
+ // InterfaceOf completely unwraps the Value to the underlying Go type.
+ // InterfaceOf panics if the input is nil or does not represent the
+ // appropriate underlying Go type. For composite types, it panics if the
+ // value is not mutable.
+ //
+ // InterfaceOf is able to unwrap the Value further than Value.Interface
+ // as it has more type information available.
+ InterfaceOf(Value) interface{}
+
+ // IsValidValue reports whether the Value is valid to assign to the field.
+ IsValidValue(Value) bool
+
+ // IsValidInterface reports whether the input is valid to assign to the field.
+ IsValidInterface(interface{}) bool
+}
+
+// EnumDescriptor describes an enum and
+// corresponds with the google.protobuf.EnumDescriptorProto message.
+//
+// Nested declarations:
+// EnumValueDescriptor.
+type EnumDescriptor interface {
+ Descriptor
+
+ // Values is a list of nested enum value declarations.
+ Values() EnumValueDescriptors
+
+ // ReservedNames is a list of reserved enum names.
+ ReservedNames() Names
+ // ReservedRanges is a list of reserved ranges of enum numbers.
+ ReservedRanges() EnumRanges
+
+ isEnumDescriptor
+}
+type isEnumDescriptor interface{ ProtoType(EnumDescriptor) }
+
+// EnumType encapsulates an EnumDescriptor with a concrete Go implementation.
+type EnumType interface {
+ // New returns an instance of this enum type with its value set to n.
+ New(n EnumNumber) Enum
+
+ // Descriptor returns the enum descriptor.
+ //
+ // Invariant: t.Descriptor() == t.New(0).Descriptor()
+ Descriptor() EnumDescriptor
+}
+
+// EnumDescriptors is a list of enum declarations.
+type EnumDescriptors interface {
+ // Len reports the number of enum types.
+ Len() int
+ // Get returns the ith EnumDescriptor. It panics if out of bounds.
+ Get(i int) EnumDescriptor
+ // ByName returns the EnumDescriptor for an enum named s.
+ // It returns nil if not found.
+ ByName(s Name) EnumDescriptor
+
+ doNotImplement
+}
+
+// EnumValueDescriptor describes an enum value and
+// corresponds with the google.protobuf.EnumValueDescriptorProto message.
+//
+// All other proto declarations are in the namespace of the parent.
+// However, enum values do not follow this rule and are within the namespace
+// of the parent's parent (i.e., they are a sibling of the containing enum).
+// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified
+// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE".
+type EnumValueDescriptor interface {
+ Descriptor
+
+ // Number returns the enum value as an integer.
+ Number() EnumNumber
+
+ isEnumValueDescriptor
+}
+type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) }
+
+// EnumValueDescriptors is a list of enum value declarations.
+type EnumValueDescriptors interface {
+ // Len reports the number of enum values.
+ Len() int
+ // Get returns the ith EnumValueDescriptor. It panics if out of bounds.
+ Get(i int) EnumValueDescriptor
+ // ByName returns the EnumValueDescriptor for the enum value named s.
+ // It returns nil if not found.
+ ByName(s Name) EnumValueDescriptor
+ // ByNumber returns the EnumValueDescriptor for the enum value numbered n.
+ // If multiple have the same number, the first one defined is returned
+ // It returns nil if not found.
+ ByNumber(n EnumNumber) EnumValueDescriptor
+
+ doNotImplement
+}
+
+// ServiceDescriptor describes a service and
+// corresponds with the google.protobuf.ServiceDescriptorProto message.
+//
+// Nested declarations: MethodDescriptor.
+type ServiceDescriptor interface {
+ Descriptor
+
+ // Methods is a list of nested message declarations.
+ Methods() MethodDescriptors
+
+ isServiceDescriptor
+}
+type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) }
+
+// ServiceDescriptors is a list of service declarations.
+type ServiceDescriptors interface {
+ // Len reports the number of services.
+ Len() int
+ // Get returns the ith ServiceDescriptor. It panics if out of bounds.
+ Get(i int) ServiceDescriptor
+ // ByName returns the ServiceDescriptor for a service named s.
+ // It returns nil if not found.
+ ByName(s Name) ServiceDescriptor
+
+ doNotImplement
+}
+
+// MethodDescriptor describes a method and
+// corresponds with the google.protobuf.MethodDescriptorProto message.
+type MethodDescriptor interface {
+ Descriptor
+
+ // Input is the input message descriptor.
+ Input() MessageDescriptor
+ // Output is the output message descriptor.
+ Output() MessageDescriptor
+ // IsStreamingClient reports whether the client streams multiple messages.
+ IsStreamingClient() bool
+ // IsStreamingServer reports whether the server streams multiple messages.
+ IsStreamingServer() bool
+
+ isMethodDescriptor
+}
+type isMethodDescriptor interface{ ProtoType(MethodDescriptor) }
+
+// MethodDescriptors is a list of method declarations.
+type MethodDescriptors interface {
+ // Len reports the number of methods.
+ Len() int
+ // Get returns the ith MethodDescriptor. It panics if out of bounds.
+ Get(i int) MethodDescriptor
+ // ByName returns the MethodDescriptor for a service method named s.
+ // It returns nil if not found.
+ ByName(s Name) MethodDescriptor
+
+ doNotImplement
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
new file mode 100644
index 0000000..f319810
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -0,0 +1,285 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import "google.golang.org/protobuf/encoding/protowire"
+
+// Enum is a reflection interface for a concrete enum value,
+// which provides type information and a getter for the enum number.
+// Enum does not provide a mutable API since enums are commonly backed by
+// Go constants, which are not addressable.
+type Enum interface {
+ // Descriptor returns enum descriptor, which contains only the protobuf
+ // type information for the enum.
+ Descriptor() EnumDescriptor
+
+ // Type returns the enum type, which encapsulates both Go and protobuf
+ // type information. If the Go type information is not needed,
+ // it is recommended that the enum descriptor be used instead.
+ Type() EnumType
+
+ // Number returns the enum value as an integer.
+ Number() EnumNumber
+}
+
+// Message is a reflective interface for a concrete message value,
+// encapsulating both type and value information for the message.
+//
+// Accessor/mutators for individual fields are keyed by FieldDescriptor.
+// For non-extension fields, the descriptor must exactly match the
+// field known by the parent message.
+// For extension fields, the descriptor must implement ExtensionTypeDescriptor,
+// extend the parent message (i.e., have the same message FullName), and
+// be within the parent's extension range.
+//
+// Each field Value can be a scalar or a composite type (Message, List, or Map).
+// See Value for the Go types associated with a FieldDescriptor.
+// Providing a Value that is invalid or of an incorrect type panics.
+type Message interface {
+ // Descriptor returns message descriptor, which contains only the protobuf
+ // type information for the message.
+ Descriptor() MessageDescriptor
+
+ // Type returns the message type, which encapsulates both Go and protobuf
+ // type information. If the Go type information is not needed,
+ // it is recommended that the message descriptor be used instead.
+ Type() MessageType
+
+ // New returns a newly allocated and mutable empty message.
+ New() Message
+
+ // Interface unwraps the message reflection interface and
+ // returns the underlying ProtoMessage interface.
+ Interface() ProtoMessage
+
+ // Range iterates over every populated field in an undefined order,
+ // calling f for each field descriptor and value encountered.
+ // Range returns immediately if f returns false.
+ // While iterating, mutating operations may only be performed
+ // on the current field descriptor.
+ Range(f func(FieldDescriptor, Value) bool)
+
+ // Has reports whether a field is populated.
+ //
+ // Some fields have the property of nullability where it is possible to
+ // distinguish between the default value of a field and whether the field
+ // was explicitly populated with the default value. Singular message fields,
+ // member fields of a oneof, and proto2 scalar fields are nullable. Such
+ // fields are populated only if explicitly set.
+ //
+ // In other cases (aside from the nullable cases above),
+ // a proto3 scalar field is populated if it contains a non-zero value, and
+ // a repeated field is populated if it is non-empty.
+ Has(FieldDescriptor) bool
+
+ // Clear clears the field such that a subsequent Has call reports false.
+ //
+ // Clearing an extension field clears both the extension type and value
+ // associated with the given field number.
+ //
+ // Clear is a mutating operation and unsafe for concurrent use.
+ Clear(FieldDescriptor)
+
+ // Get retrieves the value for a field.
+ //
+ // For unpopulated scalars, it returns the default value, where
+ // the default value of a bytes scalar is guaranteed to be a copy.
+ // For unpopulated composite types, it returns an empty, read-only view
+ // of the value; to obtain a mutable reference, use Mutable.
+ Get(FieldDescriptor) Value
+
+ // Set stores the value for a field.
+ //
+ // For a field belonging to a oneof, it implicitly clears any other field
+ // that may be currently set within the same oneof.
+ // For extension fields, it implicitly stores the provided ExtensionType.
+ // When setting a composite type, it is unspecified whether the stored value
+ // aliases the source's memory in any way. If the composite value is an
+ // empty, read-only value, then it panics.
+ //
+ // Set is a mutating operation and unsafe for concurrent use.
+ Set(FieldDescriptor, Value)
+
+ // Mutable returns a mutable reference to a composite type.
+ //
+ // If the field is unpopulated, it may allocate a composite value.
+ // For a field belonging to a oneof, it implicitly clears any other field
+ // that may be currently set within the same oneof.
+ // For extension fields, it implicitly stores the provided ExtensionType
+ // if not already stored.
+ // It panics if the field does not contain a composite type.
+ //
+ // Mutable is a mutating operation and unsafe for concurrent use.
+ Mutable(FieldDescriptor) Value
+
+ // NewField returns a new value that is assignable to the field
+ // for the given descriptor. For scalars, this returns the default value.
+ // For lists, maps, and messages, this returns a new, empty, mutable value.
+ NewField(FieldDescriptor) Value
+
+ // WhichOneof reports which field within the oneof is populated,
+ // returning nil if none are populated.
+ // It panics if the oneof descriptor does not belong to this message.
+ WhichOneof(OneofDescriptor) FieldDescriptor
+
+ // GetUnknown retrieves the entire list of unknown fields.
+ // The caller may only mutate the contents of the RawFields
+ // if the mutated bytes are stored back into the message with SetUnknown.
+ GetUnknown() RawFields
+
+ // SetUnknown stores an entire list of unknown fields.
+ // The raw fields must be syntactically valid according to the wire format.
+ // An implementation may panic if this is not the case.
+ // Once stored, the caller must not mutate the content of the RawFields.
+ // An empty RawFields may be passed to clear the fields.
+ //
+ // SetUnknown is a mutating operation and unsafe for concurrent use.
+ SetUnknown(RawFields)
+
+ // IsValid reports whether the message is valid.
+ //
+ // An invalid message is an empty, read-only value.
+ //
+ // An invalid message often corresponds to a nil pointer of the concrete
+ // message type, but the details are implementation dependent.
+ // Validity is not part of the protobuf data model, and may not
+ // be preserved in marshaling or other operations.
+ IsValid() bool
+
+ // ProtoMethods returns optional fast-path implementions of various operations.
+ // This method may return nil.
+ //
+ // The returned methods type is identical to
+ // "google.golang.org/protobuf/runtime/protoiface".Methods.
+ // Consult the protoiface package documentation for details.
+ ProtoMethods() *methods
+}
+
+// RawFields is the raw bytes for an ordered sequence of fields.
+// Each field contains both the tag (representing field number and wire type),
+// and also the wire data itself.
+type RawFields []byte
+
+// IsValid reports whether b is syntactically correct wire format.
+func (b RawFields) IsValid() bool {
+ for len(b) > 0 {
+ _, _, n := protowire.ConsumeField(b)
+ if n < 0 {
+ return false
+ }
+ b = b[n:]
+ }
+ return true
+}
+
+// List is a zero-indexed, ordered list.
+// The element Value type is determined by FieldDescriptor.Kind.
+// Providing a Value that is invalid or of an incorrect type panics.
+type List interface {
+ // Len reports the number of entries in the List.
+ // Get, Set, and Truncate panic with out of bound indexes.
+ Len() int
+
+ // Get retrieves the value at the given index.
+ // It never returns an invalid value.
+ Get(int) Value
+
+ // Set stores a value for the given index.
+ // When setting a composite type, it is unspecified whether the set
+ // value aliases the source's memory in any way.
+ //
+ // Set is a mutating operation and unsafe for concurrent use.
+ Set(int, Value)
+
+ // Append appends the provided value to the end of the list.
+ // When appending a composite type, it is unspecified whether the appended
+ // value aliases the source's memory in any way.
+ //
+ // Append is a mutating operation and unsafe for concurrent use.
+ Append(Value)
+
+ // AppendMutable appends a new, empty, mutable message value to the end
+ // of the list and returns it.
+ // It panics if the list does not contain a message type.
+ AppendMutable() Value
+
+ // Truncate truncates the list to a smaller length.
+ //
+ // Truncate is a mutating operation and unsafe for concurrent use.
+ Truncate(int)
+
+ // NewElement returns a new value for a list element.
+ // For enums, this returns the first enum value.
+ // For other scalars, this returns the zero value.
+ // For messages, this returns a new, empty, mutable value.
+ NewElement() Value
+
+ // IsValid reports whether the list is valid.
+ //
+ // An invalid list is an empty, read-only value.
+ //
+ // Validity is not part of the protobuf data model, and may not
+ // be preserved in marshaling or other operations.
+ IsValid() bool
+}
+
+// Map is an unordered, associative map.
+// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind.
+// The entry Value type is determined by FieldDescriptor.MapValue.Kind.
+// Providing a MapKey or Value that is invalid or of an incorrect type panics.
+type Map interface {
+ // Len reports the number of elements in the map.
+ Len() int
+
+ // Range iterates over every map entry in an undefined order,
+ // calling f for each key and value encountered.
+ // Range calls f Len times unless f returns false, which stops iteration.
+ // While iterating, mutating operations may only be performed
+ // on the current map key.
+ Range(f func(MapKey, Value) bool)
+
+ // Has reports whether an entry with the given key is in the map.
+ Has(MapKey) bool
+
+ // Clear clears the entry associated with they given key.
+ // The operation does nothing if there is no entry associated with the key.
+ //
+ // Clear is a mutating operation and unsafe for concurrent use.
+ Clear(MapKey)
+
+ // Get retrieves the value for an entry with the given key.
+ // It returns an invalid value for non-existent entries.
+ Get(MapKey) Value
+
+ // Set stores the value for an entry with the given key.
+ // It panics when given a key or value that is invalid or the wrong type.
+ // When setting a composite type, it is unspecified whether the set
+ // value aliases the source's memory in any way.
+ //
+ // Set is a mutating operation and unsafe for concurrent use.
+ Set(MapKey, Value)
+
+ // Mutable retrieves a mutable reference to the entry for the given key.
+ // If no entry exists for the key, it creates a new, empty, mutable value
+ // and stores it as the entry for the key.
+ // It panics if the map value is not a message.
+ Mutable(MapKey) Value
+
+ // NewValue returns a new value assignable as a map value.
+ // For enums, this returns the first enum value.
+ // For other scalars, this returns the zero value.
+ // For messages, this returns a new, empty, mutable value.
+ NewValue() Value
+
+ // IsValid reports whether the map is valid.
+ //
+ // An invalid map is an empty, read-only value.
+ //
+ // An invalid message often corresponds to a nil Go map value,
+ // but the details are implementation dependent.
+ // Validity is not part of the protobuf data model, and may not
+ // be preserved in marshaling or other operations.
+ IsValid() bool
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
new file mode 100644
index 0000000..918e685
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package protoreflect
+
+import "google.golang.org/protobuf/internal/pragma"
+
+type valueType int
+
+const (
+ nilType valueType = iota
+ boolType
+ int32Type
+ int64Type
+ uint32Type
+ uint64Type
+ float32Type
+ float64Type
+ stringType
+ bytesType
+ enumType
+ ifaceType
+)
+
+// value is a union where only one type can be represented at a time.
+// This uses a distinct field for each type. This is type safe in Go, but
+// occupies more memory than necessary (72B).
+type value struct {
+ pragma.DoNotCompare // 0B
+
+ typ valueType // 8B
+ num uint64 // 8B
+ str string // 16B
+ bin []byte // 24B
+ iface interface{} // 16B
+}
+
+func valueOfString(v string) Value {
+ return Value{typ: stringType, str: v}
+}
+func valueOfBytes(v []byte) Value {
+ return Value{typ: bytesType, bin: v}
+}
+func valueOfIface(v interface{}) Value {
+ return Value{typ: ifaceType, iface: v}
+}
+
+func (v Value) getString() string {
+ return v.str
+}
+func (v Value) getBytes() []byte {
+ return v.bin
+}
+func (v Value) getIface() interface{} {
+ return v.iface
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
new file mode 100644
index 0000000..5a34147
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -0,0 +1,411 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+ "fmt"
+ "math"
+)
+
+// Value is a union where only one Go type may be set at a time.
+// The Value is used to represent all possible values a field may take.
+// The following shows which Go type is used to represent each proto Kind:
+//
+// ╔════════════╤═════════════════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠════════════╪═════════════════════════════════════╣
+// ║ bool │ BoolKind ║
+// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║
+// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║
+// ║ uint32 │ Uint32Kind, Fixed32Kind ║
+// ║ uint64 │ Uint64Kind, Fixed64Kind ║
+// ║ float32 │ FloatKind ║
+// ║ float64 │ DoubleKind ║
+// ║ string │ StringKind ║
+// ║ []byte │ BytesKind ║
+// ║ EnumNumber │ EnumKind ║
+// ║ Message │ MessageKind, GroupKind ║
+// ╚════════════╧═════════════════════════════════════╝
+//
+// Multiple protobuf Kinds may be represented by a single Go type if the type
+// can losslessly represent the information for the proto kind. For example,
+// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64,
+// but use different integer encoding methods.
+//
+// The List or Map types are used if the field cardinality is repeated.
+// A field is a List if FieldDescriptor.IsList reports true.
+// A field is a Map if FieldDescriptor.IsMap reports true.
+//
+// Converting to/from a Value and a concrete Go value panics on type mismatch.
+// For example, ValueOf("hello").Int() panics because this attempts to
+// retrieve an int64 from a string.
+type Value value
+
+// The protoreflect API uses a custom Value union type instead of interface{}
+// to keep the future open for performance optimizations. Using an interface{}
+// always incurs an allocation for primitives (e.g., int64) since it needs to
+// be boxed on the heap (as interfaces can only contain pointers natively).
+// Instead, we represent the Value union as a flat struct that internally keeps
+// track of which type is set. Using unsafe, the Value union can be reduced
+// down to 24B, which is identical in size to a slice.
+//
+// The latest compiler (Go1.11) currently suffers from some limitations:
+// • With inlining, the compiler should be able to statically prove that
+// only one of these switch cases are taken and inline one specific case.
+// See https://golang.org/issue/22310.
+
+// ValueOf returns a Value initialized with the concrete value stored in v.
+// This panics if the type does not match one of the allowed types in the
+// Value union.
+func ValueOf(v interface{}) Value {
+ switch v := v.(type) {
+ case nil:
+ return Value{}
+ case bool:
+ return ValueOfBool(v)
+ case int32:
+ return ValueOfInt32(v)
+ case int64:
+ return ValueOfInt64(v)
+ case uint32:
+ return ValueOfUint32(v)
+ case uint64:
+ return ValueOfUint64(v)
+ case float32:
+ return ValueOfFloat32(v)
+ case float64:
+ return ValueOfFloat64(v)
+ case string:
+ return ValueOfString(v)
+ case []byte:
+ return ValueOfBytes(v)
+ case EnumNumber:
+ return ValueOfEnum(v)
+ case Message, List, Map:
+ return valueOfIface(v)
+ case ProtoMessage:
+ panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v))
+ default:
+ panic(fmt.Sprintf("invalid type: %T", v))
+ }
+}
+
+// ValueOfBool returns a new boolean value.
+func ValueOfBool(v bool) Value {
+ if v {
+ return Value{typ: boolType, num: 1}
+ } else {
+ return Value{typ: boolType, num: 0}
+ }
+}
+
+// ValueOfInt32 returns a new int32 value.
+func ValueOfInt32(v int32) Value {
+ return Value{typ: int32Type, num: uint64(v)}
+}
+
+// ValueOfInt64 returns a new int64 value.
+func ValueOfInt64(v int64) Value {
+ return Value{typ: int64Type, num: uint64(v)}
+}
+
+// ValueOfUint32 returns a new uint32 value.
+func ValueOfUint32(v uint32) Value {
+ return Value{typ: uint32Type, num: uint64(v)}
+}
+
+// ValueOfUint64 returns a new uint64 value.
+func ValueOfUint64(v uint64) Value {
+ return Value{typ: uint64Type, num: v}
+}
+
+// ValueOfFloat32 returns a new float32 value.
+func ValueOfFloat32(v float32) Value {
+ return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))}
+}
+
+// ValueOfFloat64 returns a new float64 value.
+func ValueOfFloat64(v float64) Value {
+ return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))}
+}
+
+// ValueOfString returns a new string value.
+func ValueOfString(v string) Value {
+ return valueOfString(v)
+}
+
+// ValueOfBytes returns a new bytes value.
+func ValueOfBytes(v []byte) Value {
+ return valueOfBytes(v[:len(v):len(v)])
+}
+
+// ValueOfEnum returns a new enum value.
+func ValueOfEnum(v EnumNumber) Value {
+ return Value{typ: enumType, num: uint64(v)}
+}
+
+// ValueOfMessage returns a new Message value.
+func ValueOfMessage(v Message) Value {
+ return valueOfIface(v)
+}
+
+// ValueOfList returns a new List value.
+func ValueOfList(v List) Value {
+ return valueOfIface(v)
+}
+
+// ValueOfMap returns a new Map value.
+func ValueOfMap(v Map) Value {
+ return valueOfIface(v)
+}
+
+// IsValid reports whether v is populated with a value.
+func (v Value) IsValid() bool {
+ return v.typ != nilType
+}
+
+// Interface returns v as an interface{}.
+//
+// Invariant: v == ValueOf(v).Interface()
+func (v Value) Interface() interface{} {
+ switch v.typ {
+ case nilType:
+ return nil
+ case boolType:
+ return v.Bool()
+ case int32Type:
+ return int32(v.Int())
+ case int64Type:
+ return int64(v.Int())
+ case uint32Type:
+ return uint32(v.Uint())
+ case uint64Type:
+ return uint64(v.Uint())
+ case float32Type:
+ return float32(v.Float())
+ case float64Type:
+ return float64(v.Float())
+ case stringType:
+ return v.String()
+ case bytesType:
+ return v.Bytes()
+ case enumType:
+ return v.Enum()
+ default:
+ return v.getIface()
+ }
+}
+
+func (v Value) typeName() string {
+ switch v.typ {
+ case nilType:
+ return "nil"
+ case boolType:
+ return "bool"
+ case int32Type:
+ return "int32"
+ case int64Type:
+ return "int64"
+ case uint32Type:
+ return "uint32"
+ case uint64Type:
+ return "uint64"
+ case float32Type:
+ return "float32"
+ case float64Type:
+ return "float64"
+ case stringType:
+ return "string"
+ case bytesType:
+ return "bytes"
+ case enumType:
+ return "enum"
+ default:
+ switch v := v.getIface().(type) {
+ case Message:
+ return "message"
+ case List:
+ return "list"
+ case Map:
+ return "map"
+ default:
+ return fmt.Sprintf("", v)
+ }
+ }
+}
+
+func (v Value) panicMessage(what string) string {
+ return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what)
+}
+
+// Bool returns v as a bool and panics if the type is not a bool.
+func (v Value) Bool() bool {
+ switch v.typ {
+ case boolType:
+ return v.num > 0
+ default:
+ panic(v.panicMessage("bool"))
+ }
+}
+
+// Int returns v as a int64 and panics if the type is not a int32 or int64.
+func (v Value) Int() int64 {
+ switch v.typ {
+ case int32Type, int64Type:
+ return int64(v.num)
+ default:
+ panic(v.panicMessage("int"))
+ }
+}
+
+// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64.
+func (v Value) Uint() uint64 {
+ switch v.typ {
+ case uint32Type, uint64Type:
+ return uint64(v.num)
+ default:
+ panic(v.panicMessage("uint"))
+ }
+}
+
+// Float returns v as a float64 and panics if the type is not a float32 or float64.
+func (v Value) Float() float64 {
+ switch v.typ {
+ case float32Type, float64Type:
+ return math.Float64frombits(uint64(v.num))
+ default:
+ panic(v.panicMessage("float"))
+ }
+}
+
+// String returns v as a string. Since this method implements fmt.Stringer,
+// this returns the formatted string value for any non-string type.
+func (v Value) String() string {
+ switch v.typ {
+ case stringType:
+ return v.getString()
+ default:
+ return fmt.Sprint(v.Interface())
+ }
+}
+
+// Bytes returns v as a []byte and panics if the type is not a []byte.
+func (v Value) Bytes() []byte {
+ switch v.typ {
+ case bytesType:
+ return v.getBytes()
+ default:
+ panic(v.panicMessage("bytes"))
+ }
+}
+
+// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber.
+func (v Value) Enum() EnumNumber {
+ switch v.typ {
+ case enumType:
+ return EnumNumber(v.num)
+ default:
+ panic(v.panicMessage("enum"))
+ }
+}
+
+// Message returns v as a Message and panics if the type is not a Message.
+func (v Value) Message() Message {
+ switch vi := v.getIface().(type) {
+ case Message:
+ return vi
+ default:
+ panic(v.panicMessage("message"))
+ }
+}
+
+// List returns v as a List and panics if the type is not a List.
+func (v Value) List() List {
+ switch vi := v.getIface().(type) {
+ case List:
+ return vi
+ default:
+ panic(v.panicMessage("list"))
+ }
+}
+
+// Map returns v as a Map and panics if the type is not a Map.
+func (v Value) Map() Map {
+ switch vi := v.getIface().(type) {
+ case Map:
+ return vi
+ default:
+ panic(v.panicMessage("map"))
+ }
+}
+
+// MapKey returns v as a MapKey and panics for invalid MapKey types.
+func (v Value) MapKey() MapKey {
+ switch v.typ {
+ case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType:
+ return MapKey(v)
+ default:
+ panic(v.panicMessage("map key"))
+ }
+}
+
+// MapKey is used to index maps, where the Go type of the MapKey must match
+// the specified key Kind (see MessageDescriptor.IsMapEntry).
+// The following shows what Go type is used to represent each proto Kind:
+//
+// ╔═════════╤═════════════════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═════════╪═════════════════════════════════════╣
+// ║ bool │ BoolKind ║
+// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║
+// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║
+// ║ uint32 │ Uint32Kind, Fixed32Kind ║
+// ║ uint64 │ Uint64Kind, Fixed64Kind ║
+// ║ string │ StringKind ║
+// ╚═════════╧═════════════════════════════════════╝
+//
+// A MapKey is constructed and accessed through a Value:
+// k := ValueOf("hash").MapKey() // convert string to MapKey
+// s := k.String() // convert MapKey to string
+//
+// The MapKey is a strict subset of valid types used in Value;
+// converting a Value to a MapKey with an invalid type panics.
+type MapKey value
+
+// IsValid reports whether k is populated with a value.
+func (k MapKey) IsValid() bool {
+ return Value(k).IsValid()
+}
+
+// Interface returns k as an interface{}.
+func (k MapKey) Interface() interface{} {
+ return Value(k).Interface()
+}
+
+// Bool returns k as a bool and panics if the type is not a bool.
+func (k MapKey) Bool() bool {
+ return Value(k).Bool()
+}
+
+// Int returns k as a int64 and panics if the type is not a int32 or int64.
+func (k MapKey) Int() int64 {
+ return Value(k).Int()
+}
+
+// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64.
+func (k MapKey) Uint() uint64 {
+ return Value(k).Uint()
+}
+
+// String returns k as a string. Since this method implements fmt.Stringer,
+// this returns the formatted string value for any non-string type.
+func (k MapKey) String() string {
+ return Value(k).String()
+}
+
+// Value returns k as a Value.
+func (k MapKey) Value() Value {
+ return Value(k)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
new file mode 100644
index 0000000..c45debd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -0,0 +1,98 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package protoreflect
+
+import (
+ "unsafe"
+
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+type (
+ stringHeader struct {
+ Data unsafe.Pointer
+ Len int
+ }
+ sliceHeader struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+ }
+ ifaceHeader struct {
+ Type unsafe.Pointer
+ Data unsafe.Pointer
+ }
+)
+
+var (
+ nilType = typeOf(nil)
+ boolType = typeOf(*new(bool))
+ int32Type = typeOf(*new(int32))
+ int64Type = typeOf(*new(int64))
+ uint32Type = typeOf(*new(uint32))
+ uint64Type = typeOf(*new(uint64))
+ float32Type = typeOf(*new(float32))
+ float64Type = typeOf(*new(float64))
+ stringType = typeOf(*new(string))
+ bytesType = typeOf(*new([]byte))
+ enumType = typeOf(*new(EnumNumber))
+)
+
+// typeOf returns a pointer to the Go type information.
+// The pointer is comparable and equal if and only if the types are identical.
+func typeOf(t interface{}) unsafe.Pointer {
+ return (*ifaceHeader)(unsafe.Pointer(&t)).Type
+}
+
+// value is a union where only one type can be represented at a time.
+// The struct is 24B large on 64-bit systems and requires the minimum storage
+// necessary to represent each possible type.
+//
+// The Go GC needs to be able to scan variables containing pointers.
+// As such, pointers and non-pointers cannot be intermixed.
+type value struct {
+ pragma.DoNotCompare // 0B
+
+ // typ stores the type of the value as a pointer to the Go type.
+ typ unsafe.Pointer // 8B
+
+ // ptr stores the data pointer for a String, Bytes, or interface value.
+ ptr unsafe.Pointer // 8B
+
+ // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
+ // Enum value as a raw uint64.
+ //
+ // It is also used to store the length of a String or Bytes value;
+ // the capacity is ignored.
+ num uint64 // 8B
+}
+
+func valueOfString(v string) Value {
+ p := (*stringHeader)(unsafe.Pointer(&v))
+ return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
+}
+func valueOfBytes(v []byte) Value {
+ p := (*sliceHeader)(unsafe.Pointer(&v))
+ return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
+}
+func valueOfIface(v interface{}) Value {
+ p := (*ifaceHeader)(unsafe.Pointer(&v))
+ return Value{typ: p.Type, ptr: p.Data}
+}
+
+func (v Value) getString() (x string) {
+ *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
+ return x
+}
+func (v Value) getBytes() (x []byte) {
+ *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
+ return x
+}
+func (v Value) getIface() (x interface{}) {
+ *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
+ return x
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
new file mode 100644
index 0000000..66dcbcd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -0,0 +1,869 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoregistry provides data structures to register and lookup
+// protobuf descriptor types.
+//
+// The Files registry contains file descriptors and provides the ability
+// to iterate over the files or lookup a specific descriptor within the files.
+// Files only contains protobuf descriptors and has no understanding of Go
+// type information that may be associated with each descriptor.
+//
+// The Types registry contains descriptor types for which there is a known
+// Go type associated with that descriptor. It provides the ability to iterate
+// over the registered types or lookup a type by name.
+package protoregistry
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// conflictPolicy configures the policy for handling registration conflicts.
+//
+// It can be over-written at compile time with a linker-initialized variable:
+// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn"
+//
+// It can be over-written at program execution with an environment variable:
+// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main
+//
+// Neither of the above are covered by the compatibility promise and
+// may be removed in a future release of this module.
+var conflictPolicy = "panic" // "panic" | "warn" | "ignore"
+
+// ignoreConflict reports whether to ignore a registration conflict
+// given the descriptor being registered and the error.
+// It is a variable so that the behavior is easily overridden in another file.
+var ignoreConflict = func(d protoreflect.Descriptor, err error) bool {
+ const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT"
+ const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict"
+ policy := conflictPolicy
+ if v := os.Getenv(env); v != "" {
+ policy = v
+ }
+ switch policy {
+ case "panic":
+ panic(fmt.Sprintf("%v\nSee %v\n", err, faq))
+ case "warn":
+ fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq)
+ return true
+ case "ignore":
+ return true
+ default:
+ panic("invalid " + env + " value: " + os.Getenv(env))
+ }
+}
+
+var globalMutex sync.RWMutex
+
+// GlobalFiles is a global registry of file descriptors.
+var GlobalFiles *Files = new(Files)
+
+// GlobalTypes is the registry used by default for type lookups
+// unless a local registry is provided by the user.
+var GlobalTypes *Types = new(Types)
+
+// NotFound is a sentinel error value to indicate that the type was not found.
+//
+// Since registry lookup can happen in the critical performance path, resolvers
+// must return this exact error value, not an error wrapping it.
+var NotFound = errors.New("not found")
+
+// Files is a registry for looking up or iterating over files and the
+// descriptors contained within them.
+// The Find and Range methods are safe for concurrent use.
+type Files struct {
+ // The map of descsByName contains:
+ // EnumDescriptor
+ // EnumValueDescriptor
+ // MessageDescriptor
+ // ExtensionDescriptor
+ // ServiceDescriptor
+ // *packageDescriptor
+ //
+ // Note that files are stored as a slice, since a package may contain
+ // multiple files. Only top-level declarations are registered.
+ // Note that enum values are in the top-level since that are in the same
+ // scope as the parent enum.
+ descsByName map[protoreflect.FullName]interface{}
+ filesByPath map[string]protoreflect.FileDescriptor
+}
+
+type packageDescriptor struct {
+ files []protoreflect.FileDescriptor
+}
+
+// RegisterFile registers the provided file descriptor.
+//
+// If any descriptor within the file conflicts with the descriptor of any
+// previously registered file (e.g., two enums with the same full name),
+// then the file is not registered and an error is returned.
+//
+// It is permitted for multiple files to have the same file path.
+func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
+ if r == GlobalFiles {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ if r.descsByName == nil {
+ r.descsByName = map[protoreflect.FullName]interface{}{
+ "": &packageDescriptor{},
+ }
+ r.filesByPath = make(map[string]protoreflect.FileDescriptor)
+ }
+ path := file.Path()
+ if prev := r.filesByPath[path]; prev != nil {
+ r.checkGenProtoConflict(path)
+ err := errors.New("file %q is already registered", file.Path())
+ err = amendErrorWithCaller(err, prev, file)
+ if r == GlobalFiles && ignoreConflict(file, err) {
+ err = nil
+ }
+ return err
+ }
+
+ for name := file.Package(); name != ""; name = name.Parent() {
+ switch prev := r.descsByName[name]; prev.(type) {
+ case nil, *packageDescriptor:
+ default:
+ err := errors.New("file %q has a package name conflict over %v", file.Path(), name)
+ err = amendErrorWithCaller(err, prev, file)
+ if r == GlobalFiles && ignoreConflict(file, err) {
+ err = nil
+ }
+ return err
+ }
+ }
+ var err error
+ var hasConflict bool
+ rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) {
+ if prev := r.descsByName[d.FullName()]; prev != nil {
+ hasConflict = true
+ err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName())
+ err = amendErrorWithCaller(err, prev, file)
+ if r == GlobalFiles && ignoreConflict(d, err) {
+ err = nil
+ }
+ }
+ })
+ if hasConflict {
+ return err
+ }
+
+ for name := file.Package(); name != ""; name = name.Parent() {
+ if r.descsByName[name] == nil {
+ r.descsByName[name] = &packageDescriptor{}
+ }
+ }
+ p := r.descsByName[file.Package()].(*packageDescriptor)
+ p.files = append(p.files, file)
+ rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) {
+ r.descsByName[d.FullName()] = d
+ })
+ r.filesByPath[path] = file
+ return nil
+}
+
+// Several well-known types were hosted in the google.golang.org/genproto module
+// but were later moved to this module. To avoid a weak dependency on the
+// genproto module (and its relatively large set of transitive dependencies),
+// we rely on a registration conflict to determine whether the genproto version
+// is too old (i.e., does not contain aliases to the new type declarations).
+func (r *Files) checkGenProtoConflict(path string) {
+ if r != GlobalFiles {
+ return
+ }
+ var prevPath string
+ const prevModule = "google.golang.org/genproto"
+ const prevVersion = "cb27e3aa (May 26th, 2020)"
+ switch path {
+ case "google/protobuf/field_mask.proto":
+ prevPath = prevModule + "/protobuf/field_mask"
+ case "google/protobuf/api.proto":
+ prevPath = prevModule + "/protobuf/api"
+ case "google/protobuf/type.proto":
+ prevPath = prevModule + "/protobuf/ptype"
+ case "google/protobuf/source_context.proto":
+ prevPath = prevModule + "/protobuf/source_context"
+ default:
+ return
+ }
+ pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto")
+ pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb"
+ currPath := "google.golang.org/protobuf/types/known/" + pkgName
+ panic(fmt.Sprintf(""+
+ "duplicate registration of %q\n"+
+ "\n"+
+ "The generated definition for this file has moved:\n"+
+ "\tfrom: %q\n"+
+ "\tto: %q\n"+
+ "A dependency on the %q module must\n"+
+ "be at version %v or higher.\n"+
+ "\n"+
+ "Upgrade the dependency by running:\n"+
+ "\tgo get -u %v\n",
+ path, prevPath, currPath, prevModule, prevVersion, prevPath))
+}
+
+// FindDescriptorByName looks up a descriptor by the full name.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ prefix := name
+ suffix := nameSuffix("")
+ for prefix != "" {
+ if d, ok := r.descsByName[prefix]; ok {
+ switch d := d.(type) {
+ case protoreflect.EnumDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.EnumValueDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.MessageDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.ExtensionDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.ServiceDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name {
+ return d, nil
+ }
+ }
+ return nil, NotFound
+ }
+ prefix = prefix.Parent()
+ suffix = nameSuffix(name[len(prefix)+len("."):])
+ }
+ return nil, NotFound
+}
+
+func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor {
+ name := suffix.Pop()
+ if suffix == "" {
+ if ed := md.Enums().ByName(name); ed != nil {
+ return ed
+ }
+ for i := md.Enums().Len() - 1; i >= 0; i-- {
+ if vd := md.Enums().Get(i).Values().ByName(name); vd != nil {
+ return vd
+ }
+ }
+ if xd := md.Extensions().ByName(name); xd != nil {
+ return xd
+ }
+ if fd := md.Fields().ByName(name); fd != nil {
+ return fd
+ }
+ if od := md.Oneofs().ByName(name); od != nil {
+ return od
+ }
+ }
+ if md := md.Messages().ByName(name); md != nil {
+ if suffix == "" {
+ return md
+ }
+ return findDescriptorInMessage(md, suffix)
+ }
+ return nil
+}
+
+type nameSuffix string
+
+func (s *nameSuffix) Pop() (name protoreflect.Name) {
+ if i := strings.IndexByte(string(*s), '.'); i >= 0 {
+ name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:]
+ } else {
+ name, *s = protoreflect.Name((*s)), ""
+ }
+ return name
+}
+
+// FindFileByPath looks up a file by the path.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if fd, ok := r.filesByPath[path]; ok {
+ return fd, nil
+ }
+ return nil, NotFound
+}
+
+// NumFiles reports the number of registered files.
+func (r *Files) NumFiles() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return len(r.filesByPath)
+}
+
+// RangeFiles iterates over all registered files while f returns true.
+// The iteration order is undefined.
+func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, file := range r.filesByPath {
+ if !f(file) {
+ return
+ }
+ }
+}
+
+// NumFilesByPackage reports the number of registered files in a proto package.
+func (r *Files) NumFilesByPackage(name protoreflect.FullName) int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ p, ok := r.descsByName[name].(*packageDescriptor)
+ if !ok {
+ return 0
+ }
+ return len(p.files)
+}
+
+// RangeFilesByPackage iterates over all registered files in a given proto package
+// while f returns true. The iteration order is undefined.
+func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ p, ok := r.descsByName[name].(*packageDescriptor)
+ if !ok {
+ return
+ }
+ for _, file := range p.files {
+ if !f(file) {
+ return
+ }
+ }
+}
+
+// rangeTopLevelDescriptors iterates over all top-level descriptors in a file
+// which will be directly entered into the registry.
+func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) {
+ eds := fd.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ vds := eds.Get(i).Values()
+ for i := vds.Len() - 1; i >= 0; i-- {
+ f(vds.Get(i))
+ }
+ }
+ mds := fd.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ f(mds.Get(i))
+ }
+ xds := fd.Extensions()
+ for i := xds.Len() - 1; i >= 0; i-- {
+ f(xds.Get(i))
+ }
+ sds := fd.Services()
+ for i := sds.Len() - 1; i >= 0; i-- {
+ f(sds.Get(i))
+ }
+}
+
+// MessageTypeResolver is an interface for looking up messages.
+//
+// A compliant implementation must deterministically return the same type
+// if no error is encountered.
+//
+// The Types type implements this interface.
+type MessageTypeResolver interface {
+ // FindMessageByName looks up a message by its full name.
+ // E.g., "google.protobuf.Any"
+ //
+ // This return (nil, NotFound) if not found.
+ FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error)
+
+ // FindMessageByURL looks up a message by a URL identifier.
+ // See documentation on google.protobuf.Any.type_url for the URL format.
+ //
+ // This returns (nil, NotFound) if not found.
+ FindMessageByURL(url string) (protoreflect.MessageType, error)
+}
+
+// ExtensionTypeResolver is an interface for looking up extensions.
+//
+// A compliant implementation must deterministically return the same type
+// if no error is encountered.
+//
+// The Types type implements this interface.
+type ExtensionTypeResolver interface {
+ // FindExtensionByName looks up a extension field by the field's full name.
+ // Note that this is the full name of the field as determined by
+ // where the extension is declared and is unrelated to the full name of the
+ // message being extended.
+ //
+ // This returns (nil, NotFound) if not found.
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+
+ // FindExtensionByNumber looks up a extension field by the field number
+ // within some parent message, identified by full name.
+ //
+ // This returns (nil, NotFound) if not found.
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+}
+
+var (
+ _ MessageTypeResolver = (*Types)(nil)
+ _ ExtensionTypeResolver = (*Types)(nil)
+)
+
+// Types is a registry for looking up or iterating over descriptor types.
+// The Find and Range methods are safe for concurrent use.
+type Types struct {
+ typesByName typesByName
+ extensionsByMessage extensionsByMessage
+
+ numEnums int
+ numMessages int
+ numExtensions int
+}
+
+type (
+ typesByName map[protoreflect.FullName]interface{}
+ extensionsByMessage map[protoreflect.FullName]extensionsByNumber
+ extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType
+)
+
+// RegisterMessage registers the provided message type.
+//
+// If a naming conflict occurs, the type is not registered and an error is returned.
+func (r *Types) RegisterMessage(mt protoreflect.MessageType) error {
+ // Under rare circumstances getting the descriptor might recursively
+ // examine the registry, so fetch it before locking.
+ md := mt.Descriptor()
+
+ if r == GlobalTypes {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+
+ if err := r.register("message", md, mt); err != nil {
+ return err
+ }
+ r.numMessages++
+ return nil
+}
+
+// RegisterEnum registers the provided enum type.
+//
+// If a naming conflict occurs, the type is not registered and an error is returned.
+func (r *Types) RegisterEnum(et protoreflect.EnumType) error {
+ // Under rare circumstances getting the descriptor might recursively
+ // examine the registry, so fetch it before locking.
+ ed := et.Descriptor()
+
+ if r == GlobalTypes {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+
+ if err := r.register("enum", ed, et); err != nil {
+ return err
+ }
+ r.numEnums++
+ return nil
+}
+
+// RegisterExtension registers the provided extension type.
+//
+// If a naming conflict occurs, the type is not registered and an error is returned.
+func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error {
+ // Under rare circumstances getting the descriptor might recursively
+ // examine the registry, so fetch it before locking.
+ //
+ // A known case where this can happen: Fetching the TypeDescriptor for a
+ // legacy ExtensionDesc can consult the global registry.
+ xd := xt.TypeDescriptor()
+
+ if r == GlobalTypes {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+
+ field := xd.Number()
+ message := xd.ContainingMessage().FullName()
+ if prev := r.extensionsByMessage[message][field]; prev != nil {
+ err := errors.New("extension number %d is already registered on message %v", field, message)
+ err = amendErrorWithCaller(err, prev, xt)
+ if !(r == GlobalTypes && ignoreConflict(xd, err)) {
+ return err
+ }
+ }
+
+ if err := r.register("extension", xd, xt); err != nil {
+ return err
+ }
+ if r.extensionsByMessage == nil {
+ r.extensionsByMessage = make(extensionsByMessage)
+ }
+ if r.extensionsByMessage[message] == nil {
+ r.extensionsByMessage[message] = make(extensionsByNumber)
+ }
+ r.extensionsByMessage[message][field] = xt
+ r.numExtensions++
+ return nil
+}
+
+func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error {
+ name := desc.FullName()
+ prev := r.typesByName[name]
+ if prev != nil {
+ err := errors.New("%v %v is already registered", kind, name)
+ err = amendErrorWithCaller(err, prev, typ)
+ if !(r == GlobalTypes && ignoreConflict(desc, err)) {
+ return err
+ }
+ }
+ if r.typesByName == nil {
+ r.typesByName = make(typesByName)
+ }
+ r.typesByName[name] = typ
+ return nil
+}
+
+// FindEnumByName looks up an enum by its full name.
+// E.g., "google.protobuf.Field.Kind".
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if v := r.typesByName[enum]; v != nil {
+ if et, _ := v.(protoreflect.EnumType); et != nil {
+ return et, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want enum", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindMessageByName looks up a message by its full name,
+// e.g. "google.protobuf.Any".
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if v := r.typesByName[message]; v != nil {
+ if mt, _ := v.(protoreflect.MessageType); mt != nil {
+ return mt, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want message", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindMessageByURL looks up a message by a URL identifier.
+// See documentation on google.protobuf.Any.type_url for the URL format.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+ // This function is similar to FindMessageByName but
+ // truncates anything before and including '/' in the URL.
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ message := protoreflect.FullName(url)
+ if i := strings.LastIndexByte(url, '/'); i >= 0 {
+ message = message[i+len("/"):]
+ }
+
+ if v := r.typesByName[message]; v != nil {
+ if mt, _ := v.(protoreflect.MessageType); mt != nil {
+ return mt, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want message", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindExtensionByName looks up a extension field by the field's full name.
+// Note that this is the full name of the field as determined by
+// where the extension is declared and is unrelated to the full name of the
+// message being extended.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if v := r.typesByName[field]; v != nil {
+ if xt, _ := v.(protoreflect.ExtensionType); xt != nil {
+ return xt, nil
+ }
+
+ // MessageSet extensions are special in that the name of the extension
+ // is the name of the message type used to extend the MessageSet.
+ // This naming scheme is used by text and JSON serialization.
+ //
+ // This feature is protected by the ProtoLegacy flag since MessageSets
+ // are a proto1 feature that is long deprecated.
+ if flags.ProtoLegacy {
+ if _, ok := v.(protoreflect.MessageType); ok {
+ field := field.Append(messageset.ExtensionName)
+ if v := r.typesByName[field]; v != nil {
+ if xt, _ := v.(protoreflect.ExtensionType); xt != nil {
+ if messageset.IsMessageSetExtension(xt.TypeDescriptor()) {
+ return xt, nil
+ }
+ }
+ }
+ }
+ }
+
+ return nil, errors.New("found wrong type: got %v, want extension", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindExtensionByNumber looks up a extension field by the field number
+// within some parent message, identified by full name.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if xt, ok := r.extensionsByMessage[message][field]; ok {
+ return xt, nil
+ }
+ return nil, NotFound
+}
+
+// NumEnums reports the number of registered enums.
+func (r *Types) NumEnums() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return r.numEnums
+}
+
+// RangeEnums iterates over all registered enums while f returns true.
+// Iteration order is undefined.
+func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, typ := range r.typesByName {
+ if et, ok := typ.(protoreflect.EnumType); ok {
+ if !f(et) {
+ return
+ }
+ }
+ }
+}
+
+// NumMessages reports the number of registered messages.
+func (r *Types) NumMessages() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return r.numMessages
+}
+
+// RangeMessages iterates over all registered messages while f returns true.
+// Iteration order is undefined.
+func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, typ := range r.typesByName {
+ if mt, ok := typ.(protoreflect.MessageType); ok {
+ if !f(mt) {
+ return
+ }
+ }
+ }
+}
+
+// NumExtensions reports the number of registered extensions.
+func (r *Types) NumExtensions() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return r.numExtensions
+}
+
+// RangeExtensions iterates over all registered extensions while f returns true.
+// Iteration order is undefined.
+func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, typ := range r.typesByName {
+ if xt, ok := typ.(protoreflect.ExtensionType); ok {
+ if !f(xt) {
+ return
+ }
+ }
+ }
+}
+
+// NumExtensionsByMessage reports the number of registered extensions for
+// a given message type.
+func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return len(r.extensionsByMessage[message])
+}
+
+// RangeExtensionsByMessage iterates over all registered extensions filtered
+// by a given message type while f returns true. Iteration order is undefined.
+func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, xt := range r.extensionsByMessage[message] {
+ if !f(xt) {
+ return
+ }
+ }
+}
+
+func typeName(t interface{}) string {
+ switch t.(type) {
+ case protoreflect.EnumType:
+ return "enum"
+ case protoreflect.MessageType:
+ return "message"
+ case protoreflect.ExtensionType:
+ return "extension"
+ default:
+ return fmt.Sprintf("%T", t)
+ }
+}
+
+func amendErrorWithCaller(err error, prev, curr interface{}) error {
+ prevPkg := goPackage(prev)
+ currPkg := goPackage(curr)
+ if prevPkg == "" || currPkg == "" || prevPkg == currPkg {
+ return err
+ }
+ return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg)
+}
+
+func goPackage(v interface{}) string {
+ switch d := v.(type) {
+ case protoreflect.EnumType:
+ v = d.Descriptor()
+ case protoreflect.MessageType:
+ v = d.Descriptor()
+ case protoreflect.ExtensionType:
+ v = d.TypeDescriptor()
+ }
+ if d, ok := v.(protoreflect.Descriptor); ok {
+ v = d.ParentFile()
+ }
+ if d, ok := v.(interface{ GoPackagePath() string }); ok {
+ return d.GoPackagePath()
+ }
+ return ""
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go
new file mode 100644
index 0000000..c587276
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoiface
+
+type MessageV1 interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+type ExtensionRangeV1 struct {
+ Start, End int32 // both inclusive
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
new file mode 100644
index 0000000..32c04f6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoiface contains types referenced or implemented by messages.
+//
+// WARNING: This package should only be imported by message implementations.
+// The functionality found in this package should be accessed through
+// higher-level abstractions provided by the proto package.
+package protoiface
+
+import (
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Methods is a set of optional fast-path implementations of various operations.
+type Methods = struct {
+ pragma.NoUnkeyedLiterals
+
+ // Flags indicate support for optional features.
+ Flags SupportFlags
+
+ // Size returns the size in bytes of the wire-format encoding of a message.
+ // Marshal must be provided if a custom Size is provided.
+ Size func(SizeInput) SizeOutput
+
+ // Marshal formats a message in the wire-format encoding to the provided buffer.
+ // Size should be provided if a custom Marshal is provided.
+ // It must not return an error for a partial message.
+ Marshal func(MarshalInput) (MarshalOutput, error)
+
+ // Unmarshal parses the wire-format encoding and merges the result into a message.
+ // It must not reset the target message or return an error for a partial message.
+ Unmarshal func(UnmarshalInput) (UnmarshalOutput, error)
+
+ // Merge merges the contents of a source message into a destination message.
+ Merge func(MergeInput) MergeOutput
+
+ // CheckInitialized returns an error if any required fields in the message are not set.
+ CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+}
+
+// SupportFlags indicate support for optional features.
+type SupportFlags = uint64
+
+const (
+ // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported.
+ SupportMarshalDeterministic SupportFlags = 1 << iota
+
+ // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported.
+ SupportUnmarshalDiscardUnknown
+)
+
+// SizeInput is input to the Size method.
+type SizeInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+ Flags MarshalInputFlags
+}
+
+// SizeOutput is output from the Size method.
+type SizeOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Size int
+}
+
+// MarshalInput is input to the Marshal method.
+type MarshalInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+ Buf []byte // output is appended to this buffer
+ Flags MarshalInputFlags
+}
+
+// MarshalOutput is output from the Marshal method.
+type MarshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Buf []byte // contains marshaled message
+}
+
+// MarshalInputFlags configure the marshaler.
+// Most flags correspond to fields in proto.MarshalOptions.
+type MarshalInputFlags = uint8
+
+const (
+ MarshalDeterministic MarshalInputFlags = 1 << iota
+ MarshalUseCachedSize
+)
+
+// UnmarshalInput is input to the Unmarshal method.
+type UnmarshalInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+ Buf []byte // input buffer
+ Flags UnmarshalInputFlags
+ Resolver interface {
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+ }
+}
+
+// UnmarshalOutput is output from the Unmarshal method.
+type UnmarshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Flags UnmarshalOutputFlags
+}
+
+// UnmarshalInputFlags configure the unmarshaler.
+// Most flags correspond to fields in proto.UnmarshalOptions.
+type UnmarshalInputFlags = uint8
+
+const (
+ UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
+)
+
+// UnmarshalOutputFlags are output from the Unmarshal method.
+type UnmarshalOutputFlags = uint8
+
+const (
+ // UnmarshalInitialized may be set on return if all required fields are known to be set.
+ // If unset, then it does not necessarily indicate that the message is uninitialized,
+ // only that its status could not be confirmed.
+ UnmarshalInitialized UnmarshalOutputFlags = 1 << iota
+)
+
+// MergeInput is input to the Merge method.
+type MergeInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Source protoreflect.Message
+ Destination protoreflect.Message
+}
+
+// MergeOutput is output from the Merge method.
+type MergeOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Flags MergeOutputFlags
+}
+
+// MergeOutputFlags are output from the Merge method.
+type MergeOutputFlags = uint8
+
+const (
+ // MergeComplete reports whether the merge was performed.
+ // If unset, the merger must have made no changes to the destination.
+ MergeComplete MergeOutputFlags = 1 << iota
+)
+
+// CheckInitializedInput is input to the CheckInitialized method.
+type CheckInitializedInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+}
+
+// CheckInitializedOutput is output from the CheckInitialized method.
+type CheckInitializedOutput = struct {
+ pragma.NoUnkeyedLiterals
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
new file mode 100644
index 0000000..4a1ab7f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
@@ -0,0 +1,44 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoimpl contains the default implementation for messages
+// generated by protoc-gen-go.
+//
+// WARNING: This package should only ever be imported by generated messages.
+// The compatibility agreement covers nothing except for functionality needed
+// to keep existing generated messages operational. Breakages that occur due
+// to unauthorized usages of this package are not the author's responsibility.
+package protoimpl
+
+import (
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/filetype"
+ "google.golang.org/protobuf/internal/impl"
+)
+
+// UnsafeEnabled specifies whether package unsafe can be used.
+const UnsafeEnabled = impl.UnsafeEnabled
+
+type (
+ // Types used by generated code in init functions.
+ DescBuilder = filedesc.Builder
+ TypeBuilder = filetype.Builder
+
+ // Types used by generated code to implement EnumType, MessageType, and ExtensionType.
+ EnumInfo = impl.EnumInfo
+ MessageInfo = impl.MessageInfo
+ ExtensionInfo = impl.ExtensionInfo
+
+ // Types embedded in generated messages.
+ MessageState = impl.MessageState
+ SizeCache = impl.SizeCache
+ WeakFields = impl.WeakFields
+ UnknownFields = impl.UnknownFields
+ ExtensionFields = impl.ExtensionFields
+ ExtensionFieldV1 = impl.ExtensionField
+
+ Pointer = impl.Pointer
+)
+
+var X impl.Export
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
new file mode 100644
index 0000000..ff094e1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
@@ -0,0 +1,56 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoimpl
+
+import (
+ "google.golang.org/protobuf/internal/version"
+)
+
+const (
+ // MaxVersion is the maximum supported version for generated .pb.go files.
+ // It is always the current version of the module.
+ MaxVersion = version.Minor
+
+ // GenVersion is the runtime version required by generated .pb.go files.
+ // This is incremented when generated code relies on new functionality
+ // in the runtime.
+ GenVersion = 20
+
+ // MinVersion is the minimum supported version for generated .pb.go files.
+ // This is incremented when the runtime drops support for old code.
+ MinVersion = 0
+)
+
+// EnforceVersion is used by code generated by protoc-gen-go
+// to statically enforce minimum and maximum versions of this package.
+// A compilation failure implies either that:
+// * the runtime package is too old and needs to be updated OR
+// * the generated code is too old and needs to be regenerated.
+//
+// The runtime package can be upgraded by running:
+// go get google.golang.org/protobuf
+//
+// The generated code can be regenerated by running:
+// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES}
+//
+// Example usage by generated code:
+// const (
+// // Verify that this generated code is sufficiently up-to-date.
+// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion)
+// // Verify that runtime/protoimpl is sufficiently up-to-date.
+// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion)
+// )
+//
+// The genVersion is the current minor version used to generated the code.
+// This compile-time check relies on negative integer overflow of a uint
+// being a compilation failure (guaranteed by the Go specification).
+type EnforceVersion uint
+
+// This enforces the following invariant:
+// MinVersion ≤ GenVersion ≤ MaxVersion
+const (
+ _ = EnforceVersion(GenVersion - MinVersion)
+ _ = EnforceVersion(MaxVersion - GenVersion)
+)
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
new file mode 100644
index 0000000..f77239f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -0,0 +1,4039 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+// Based on original Protocol Buffers design by
+// Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/descriptor.proto
+
+package descriptorpb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoiface "google.golang.org/protobuf/runtime/protoiface"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+type FieldDescriptorProto_Type int32
+
+const (
+ // 0 is reserved for errors.
+ // Order is weird for historical reasons.
+ FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+ FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3
+ FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5
+ FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+ FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+ FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
+ FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
+ // Tag-delimited aggregate.
+ // Group type is deprecated and not supported in proto3. However, Proto3
+ // implementations should still be able to parse the group wire format and
+ // treat group fields as unknown fields.
+ FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
+ FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate.
+ // New in version 2.
+ FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12
+ FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13
+ FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14
+ FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+ FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+ FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding.
+ FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding.
+)
+
+// Enum value maps for FieldDescriptorProto_Type.
+var (
+ FieldDescriptorProto_Type_name = map[int32]string{
+ 1: "TYPE_DOUBLE",
+ 2: "TYPE_FLOAT",
+ 3: "TYPE_INT64",
+ 4: "TYPE_UINT64",
+ 5: "TYPE_INT32",
+ 6: "TYPE_FIXED64",
+ 7: "TYPE_FIXED32",
+ 8: "TYPE_BOOL",
+ 9: "TYPE_STRING",
+ 10: "TYPE_GROUP",
+ 11: "TYPE_MESSAGE",
+ 12: "TYPE_BYTES",
+ 13: "TYPE_UINT32",
+ 14: "TYPE_ENUM",
+ 15: "TYPE_SFIXED32",
+ 16: "TYPE_SFIXED64",
+ 17: "TYPE_SINT32",
+ 18: "TYPE_SINT64",
+ }
+ FieldDescriptorProto_Type_value = map[string]int32{
+ "TYPE_DOUBLE": 1,
+ "TYPE_FLOAT": 2,
+ "TYPE_INT64": 3,
+ "TYPE_UINT64": 4,
+ "TYPE_INT32": 5,
+ "TYPE_FIXED64": 6,
+ "TYPE_FIXED32": 7,
+ "TYPE_BOOL": 8,
+ "TYPE_STRING": 9,
+ "TYPE_GROUP": 10,
+ "TYPE_MESSAGE": 11,
+ "TYPE_BYTES": 12,
+ "TYPE_UINT32": 13,
+ "TYPE_ENUM": 14,
+ "TYPE_SFIXED32": 15,
+ "TYPE_SFIXED64": 16,
+ "TYPE_SINT32": 17,
+ "TYPE_SINT64": 18,
+ }
+)
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+ p := new(FieldDescriptorProto_Type)
+ *p = x
+ return p
+}
+
+func (x FieldDescriptorProto_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor()
+}
+
+func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[0]
+}
+
+func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Type(num)
+ return nil
+}
+
+// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead.
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0}
+}
+
+type FieldDescriptorProto_Label int32
+
+const (
+ // 0 is reserved for errors
+ FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+ FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+ FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+// Enum value maps for FieldDescriptorProto_Label.
+var (
+ FieldDescriptorProto_Label_name = map[int32]string{
+ 1: "LABEL_OPTIONAL",
+ 2: "LABEL_REQUIRED",
+ 3: "LABEL_REPEATED",
+ }
+ FieldDescriptorProto_Label_value = map[string]int32{
+ "LABEL_OPTIONAL": 1,
+ "LABEL_REQUIRED": 2,
+ "LABEL_REPEATED": 3,
+ }
+)
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+ p := new(FieldDescriptorProto_Label)
+ *p = x
+ return p
+}
+
+func (x FieldDescriptorProto_Label) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+}
+
+func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[1]
+}
+
+func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Label(num)
+ return nil
+}
+
+// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead.
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+ FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization,
+ // etc.
+ FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods.
+ FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime.
+)
+
+// Enum value maps for FileOptions_OptimizeMode.
+var (
+ FileOptions_OptimizeMode_name = map[int32]string{
+ 1: "SPEED",
+ 2: "CODE_SIZE",
+ 3: "LITE_RUNTIME",
+ }
+ FileOptions_OptimizeMode_value = map[string]int32{
+ "SPEED": 1,
+ "CODE_SIZE": 2,
+ "LITE_RUNTIME": 3,
+ }
+)
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+ p := new(FileOptions_OptimizeMode)
+ *p = x
+ return p
+}
+
+func (x FileOptions_OptimizeMode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+}
+
+func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[2]
+}
+
+func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FileOptions_OptimizeMode(num)
+ return nil
+}
+
+// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead.
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0}
+}
+
+type FieldOptions_CType int32
+
+const (
+ // Default mode.
+ FieldOptions_STRING FieldOptions_CType = 0
+ FieldOptions_CORD FieldOptions_CType = 1
+ FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+// Enum value maps for FieldOptions_CType.
+var (
+ FieldOptions_CType_name = map[int32]string{
+ 0: "STRING",
+ 1: "CORD",
+ 2: "STRING_PIECE",
+ }
+ FieldOptions_CType_value = map[string]int32{
+ "STRING": 0,
+ "CORD": 1,
+ "STRING_PIECE": 2,
+ }
+)
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+ p := new(FieldOptions_CType)
+ *p = x
+ return p
+}
+
+func (x FieldOptions_CType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+}
+
+func (FieldOptions_CType) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[3]
+}
+
+func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_CType(num)
+ return nil
+}
+
+// Deprecated: Use FieldOptions_CType.Descriptor instead.
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0}
+}
+
+type FieldOptions_JSType int32
+
+const (
+ // Use the default type.
+ FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+ // Use JavaScript strings.
+ FieldOptions_JS_STRING FieldOptions_JSType = 1
+ // Use JavaScript numbers.
+ FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+// Enum value maps for FieldOptions_JSType.
+var (
+ FieldOptions_JSType_name = map[int32]string{
+ 0: "JS_NORMAL",
+ 1: "JS_STRING",
+ 2: "JS_NUMBER",
+ }
+ FieldOptions_JSType_value = map[string]int32{
+ "JS_NORMAL": 0,
+ "JS_STRING": 1,
+ "JS_NUMBER": 2,
+ }
+)
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+ p := new(FieldOptions_JSType)
+ *p = x
+ return p
+}
+
+func (x FieldOptions_JSType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+}
+
+func (FieldOptions_JSType) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[4]
+}
+
+func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_JSType(num)
+ return nil
+}
+
+// Deprecated: Use FieldOptions_JSType.Descriptor instead.
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1}
+}
+
+// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+// or neither? HTTP based RPC implementation may choose GET verb for safe
+// methods, and PUT verb for idempotent methods instead of the default POST.
+type MethodOptions_IdempotencyLevel int32
+
+const (
+ MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
+ MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent
+ MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects
+)
+
+// Enum value maps for MethodOptions_IdempotencyLevel.
+var (
+ MethodOptions_IdempotencyLevel_name = map[int32]string{
+ 0: "IDEMPOTENCY_UNKNOWN",
+ 1: "NO_SIDE_EFFECTS",
+ 2: "IDEMPOTENT",
+ }
+ MethodOptions_IdempotencyLevel_value = map[string]int32{
+ "IDEMPOTENCY_UNKNOWN": 0,
+ "NO_SIDE_EFFECTS": 1,
+ "IDEMPOTENT": 2,
+ }
+)
+
+func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
+ p := new(MethodOptions_IdempotencyLevel)
+ *p = x
+ return p
+}
+
+func (x MethodOptions_IdempotencyLevel) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+}
+
+func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[5]
+}
+
+func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = MethodOptions_IdempotencyLevel(num)
+ return nil
+}
+
+// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead.
+func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0}
+}
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+}
+
+func (x *FileDescriptorSet) Reset() {
+ *x = FileDescriptorSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileDescriptorSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileDescriptorSet) ProtoMessage() {}
+
+func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead.
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+ if x != nil {
+ return x.File
+ }
+ return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree
+ Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
+ // Names of files imported by this file.
+ Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+ // Indexes of the public imported files in the dependency list above.
+ PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+ // Indexes of the weak imported files in the dependency list.
+ // For Google-internal migration only. Do not use.
+ WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+ // All top-level definitions in this file.
+ MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+ Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ // This field contains optional information about the original source code.
+ // You may safely remove this entire field without harming runtime
+ // functionality of the descriptors -- the information is needed only by
+ // development tools.
+ SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+ // The syntax of the proto file.
+ // The supported values are "proto2" and "proto3".
+ Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+}
+
+func (x *FileDescriptorProto) Reset() {
+ *x = FileDescriptorProto{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileDescriptorProto) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileDescriptorProto) ProtoMessage() {}
+
+func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead.
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FileDescriptorProto) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *FileDescriptorProto) GetPackage() string {
+ if x != nil && x.Package != nil {
+ return *x.Package
+ }
+ return ""
+}
+
+func (x *FileDescriptorProto) GetDependency() []string {
+ if x != nil {
+ return x.Dependency
+ }
+ return nil
+}
+
+func (x *FileDescriptorProto) GetPublicDependency() []int32 {
+ if x != nil {
+ return x.PublicDependency
+ }
+ return nil
+}
+
+func (x *FileDescriptorProto) GetWeakDependency() []int32 {
+ if x != nil {
+ return x.WeakDependency
+ }
+ return nil
+}
+
+func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+ if x != nil {
+ return x.MessageType
+ }
+ return nil
+}
+
+func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if x != nil {
+ return x.EnumType
+ }
+ return nil
+}
+
+func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+ if x != nil {
+ return x.Service
+ }
+ return nil
+}
+
+func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if x != nil {
+ return x.Extension
+ }
+ return nil
+}
+
+func (x *FileDescriptorProto) GetOptions() *FileOptions {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+ if x != nil {
+ return x.SourceCodeInfo
+ }
+ return nil
+}
+
+func (x *FileDescriptorProto) GetSyntax() string {
+ if x != nil && x.Syntax != nil {
+ return *x.Syntax
+ }
+ return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+ NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+ OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+ Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+ ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+ // Reserved field names, which may not be used by fields in the same message.
+ // A given name may only be reserved once.
+ ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+}
+
+func (x *DescriptorProto) Reset() {
+ *x = DescriptorProto{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DescriptorProto) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DescriptorProto) ProtoMessage() {}
+
+func (x *DescriptorProto) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead.
+func (*DescriptorProto) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DescriptorProto) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *DescriptorProto) GetField() []*FieldDescriptorProto {
+ if x != nil {
+ return x.Field
+ }
+ return nil
+}
+
+func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if x != nil {
+ return x.Extension
+ }
+ return nil
+}
+
+func (x *DescriptorProto) GetNestedType() []*DescriptorProto {
+ if x != nil {
+ return x.NestedType
+ }
+ return nil
+}
+
+func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if x != nil {
+ return x.EnumType
+ }
+ return nil
+}
+
+func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+ if x != nil {
+ return x.ExtensionRange
+ }
+ return nil
+}
+
+func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+ if x != nil {
+ return x.OneofDecl
+ }
+ return nil
+}
+
+func (x *DescriptorProto) GetOptions() *MessageOptions {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+ if x != nil {
+ return x.ReservedRange
+ }
+ return nil
+}
+
+func (x *DescriptorProto) GetReservedName() []string {
+ if x != nil {
+ return x.ReservedName
+ }
+ return nil
+}
+
+type ExtensionRangeOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+}
+
+func (x *ExtensionRangeOptions) Reset() {
+ *x = ExtensionRangeOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExtensionRangeOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionRangeOptions) ProtoMessage() {}
+
+func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead.
+func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3}
+}
+
+var extRange_ExtensionRangeOptions = []protoiface.ExtensionRangeV1{
+ {Start: 1000, End: 536870911},
+}
+
+// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
+func (*ExtensionRangeOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
+ return extRange_ExtensionRangeOptions
+}
+
+func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if x != nil {
+ return x.UninterpretedOption
+ }
+ return nil
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+ Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+ // If type_name is set, this need not be set. If both this and type_name
+ // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+ Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+ // For message and enum types, this is the name of the type. If the name
+ // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
+ // rules are used to find the type (i.e. first the nested types within this
+ // message are searched, then within the parent, on up to the root
+ // namespace).
+ TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+ // For extensions, this is the name of the type being extended. It is
+ // resolved in the same manner as type_name.
+ Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+ // For numeric types, contains the original text representation of the value.
+ // For booleans, "true" or "false".
+ // For strings, contains the default text contents (not escaped in any way).
+ // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
+ // TODO(kenton): Base-64 encode?
+ DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+ // If set, gives the index of a oneof in the containing type's oneof_decl
+ // list. This field is a member of that oneof.
+ OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+ // JSON name of this field. The value is set by protocol compiler. If the
+ // user has set a "json_name" option on this field, that option's value
+ // will be used. Otherwise, it's deduced from the field's name by converting
+ // it to camelCase.
+ JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+ Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ // If true, this is a proto3 "optional". When a proto3 field is optional, it
+ // tracks presence regardless of field type.
+ //
+ // When proto3_optional is true, this field must be belong to a oneof to
+ // signal to old proto3 clients that presence is tracked for this field. This
+ // oneof is known as a "synthetic" oneof, and this field must be its sole
+ // member (each proto3 optional field gets its own synthetic oneof). Synthetic
+ // oneofs exist in the descriptor only, and do not generate any API. Synthetic
+ // oneofs must be ordered after all "real" oneofs.
+ //
+ // For message fields, proto3_optional doesn't create any semantic change,
+ // since non-repeated message fields always track presence. However it still
+ // indicates the semantic detail of whether the user wrote "optional" or not.
+ // This can be useful for round-tripping the .proto file. For consistency we
+ // give message fields a synthetic oneof also, even though it is not required
+ // to track presence. This is especially important because the parser can't
+ // tell if a field is a message or an enum, so it must always create a
+ // synthetic oneof.
+ //
+ // Proto2 optional fields do not set this flag, because they already indicate
+ // optional with `LABEL_OPTIONAL`.
+ Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"`
+}
+
+func (x *FieldDescriptorProto) Reset() {
+ *x = FieldDescriptorProto{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldDescriptorProto) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldDescriptorProto) ProtoMessage() {}
+
+func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead.
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *FieldDescriptorProto) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *FieldDescriptorProto) GetNumber() int32 {
+ if x != nil && x.Number != nil {
+ return *x.Number
+ }
+ return 0
+}
+
+func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+ if x != nil && x.Label != nil {
+ return *x.Label
+ }
+ return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (x *FieldDescriptorProto) GetTypeName() string {
+ if x != nil && x.TypeName != nil {
+ return *x.TypeName
+ }
+ return ""
+}
+
+func (x *FieldDescriptorProto) GetExtendee() string {
+ if x != nil && x.Extendee != nil {
+ return *x.Extendee
+ }
+ return ""
+}
+
+func (x *FieldDescriptorProto) GetDefaultValue() string {
+ if x != nil && x.DefaultValue != nil {
+ return *x.DefaultValue
+ }
+ return ""
+}
+
+func (x *FieldDescriptorProto) GetOneofIndex() int32 {
+ if x != nil && x.OneofIndex != nil {
+ return *x.OneofIndex
+ }
+ return 0
+}
+
+func (x *FieldDescriptorProto) GetJsonName() string {
+ if x != nil && x.JsonName != nil {
+ return *x.JsonName
+ }
+ return ""
+}
+
+func (x *FieldDescriptorProto) GetOptions() *FieldOptions {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+func (x *FieldDescriptorProto) GetProto3Optional() bool {
+ if x != nil && x.Proto3Optional != nil {
+ return *x.Proto3Optional
+ }
+ return false
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+}
+
+func (x *OneofDescriptorProto) Reset() {
+ *x = OneofDescriptorProto{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OneofDescriptorProto) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OneofDescriptorProto) ProtoMessage() {}
+
+func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead.
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *OneofDescriptorProto) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *OneofDescriptorProto) GetOptions() *OneofOptions {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ // Range of reserved numeric values. Reserved numeric values may not be used
+ // by enum values in the same enum declaration. Reserved ranges may not
+ // overlap.
+ ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+ // Reserved enum value names, which may not be reused. A given name may only
+ // be reserved once.
+ ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+}
+
+func (x *EnumDescriptorProto) Reset() {
+ *x = EnumDescriptorProto{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumDescriptorProto) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumDescriptorProto) ProtoMessage() {}
+
+func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead.
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *EnumDescriptorProto) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *EnumDescriptorProto) GetOptions() *EnumOptions {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
+ if x != nil {
+ return x.ReservedRange
+ }
+ return nil
+}
+
+func (x *EnumDescriptorProto) GetReservedName() []string {
+ if x != nil {
+ return x.ReservedName
+ }
+ return nil
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+ Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+}
+
+func (x *EnumValueDescriptorProto) Reset() {
+ *x = EnumValueDescriptorProto{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValueDescriptorProto) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValueDescriptorProto) ProtoMessage() {}
+
+func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead.
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *EnumValueDescriptorProto) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *EnumValueDescriptorProto) GetNumber() int32 {
+ if x != nil && x.Number != nil {
+ return *x.Number
+ }
+ return 0
+}
+
+func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+ Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+}
+
+func (x *ServiceDescriptorProto) Reset() {
+ *x = ServiceDescriptorProto{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServiceDescriptorProto) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceDescriptorProto) ProtoMessage() {}
+
+func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead.
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *ServiceDescriptorProto) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+ if x != nil {
+ return x.Method
+ }
+ return nil
+}
+
+func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // Input and output type names. These are resolved in the same way as
+ // FieldDescriptorProto.type_name, but must refer to a message type.
+ InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+ OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+ Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+ // Identifies if client streams multiple client messages
+ ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+ // Identifies if server streams multiple server messages
+ ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+}
+
+// Default values for MethodDescriptorProto fields.
+const (
+ Default_MethodDescriptorProto_ClientStreaming = bool(false)
+ Default_MethodDescriptorProto_ServerStreaming = bool(false)
+)
+
+func (x *MethodDescriptorProto) Reset() {
+ *x = MethodDescriptorProto{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MethodDescriptorProto) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MethodDescriptorProto) ProtoMessage() {}
+
+func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead.
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *MethodDescriptorProto) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *MethodDescriptorProto) GetInputType() string {
+ if x != nil && x.InputType != nil {
+ return *x.InputType
+ }
+ return ""
+}
+
+func (x *MethodDescriptorProto) GetOutputType() string {
+ if x != nil && x.OutputType != nil {
+ return *x.OutputType
+ }
+ return ""
+}
+
+func (x *MethodDescriptorProto) GetOptions() *MethodOptions {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+func (x *MethodDescriptorProto) GetClientStreaming() bool {
+ if x != nil && x.ClientStreaming != nil {
+ return *x.ClientStreaming
+ }
+ return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (x *MethodDescriptorProto) GetServerStreaming() bool {
+ if x != nil && x.ServerStreaming != nil {
+ return *x.ServerStreaming
+ }
+ return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ // Sets the Java package where classes generated from this .proto will be
+ // placed. By default, the proto package is used, but this is often
+ // inappropriate because proto packages do not normally start with backwards
+ // domain names.
+ JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+ // If set, all the classes from the .proto file are wrapped in a single
+ // outer class with the given name. This applies to both Proto1
+ // (equivalent to the old "--one_java_file" option) and Proto2 (where
+ // a .proto always translates to a single class, but you may want to
+ // explicitly choose the class name).
+ JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+ // If set true, then the Java code generator will generate a separate .java
+ // file for each top-level message, enum, and service defined in the .proto
+ // file. Thus, these types will *not* be nested inside the outer class
+ // named by java_outer_classname. However, the outer class will still be
+ // generated to contain the file's getDescriptor() method as well as any
+ // top-level extensions defined in the file.
+ JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+ // This option does nothing.
+ //
+ // Deprecated: Do not use.
+ JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"`
+ // If set true, then the Java2 code generator will generate code that
+ // throws an exception whenever an attempt is made to assign a non-UTF-8
+ // byte sequence to a string field.
+ // Message reflection will do the same.
+ // However, an extension field still accepts non-UTF-8 byte sequences.
+ // This option has no effect on when used with the lite runtime.
+ JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+ OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+ // Sets the Go package where structs generated from this .proto will be
+ // placed. If omitted, the Go package will be derived from the following:
+ // - The basename of the package import path, if provided.
+ // - Otherwise, the package statement in the .proto file, if present.
+ // - Otherwise, the basename of the .proto file, without extension.
+ GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+ // Should generic services be generated in each language? "Generic" services
+ // are not specific to any particular RPC system. They are generated by the
+ // main code generators in each language (without additional plugins).
+ // Generic services were the only kind of service generation supported by
+ // early versions of google.protobuf.
+ //
+ // Generic services are now considered deprecated in favor of using plugins
+ // that generate code specific to your particular RPC system. Therefore,
+ // these default to false. Old code which depends on generic services should
+ // explicitly set them to true.
+ CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+ JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+ PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+ PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
+ // Is this file deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for everything in the file, or it will be completely ignored; in the very
+ // least, this is a formalization for deprecating files.
+ Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Enables the use of arenas for the proto messages in this file. This applies
+ // only to generated classes for C++.
+ CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"`
+ // Sets the objective c class prefix which is prepended to all objective c
+ // generated classes from this .proto. There is no default.
+ ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+ // Namespace for generated classes; defaults to the package.
+ CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+ // By default Swift generators will take the proto package and CamelCase it
+ // replacing '.' with underscore and use that to prefix the types/symbols
+ // defined. When this options is provided, they will use this value instead
+ // to prefix the types/symbols defined.
+ SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
+ // Sets the php class prefix which is prepended to all php generated classes
+ // from this .proto. Default is empty.
+ PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
+ // Use this option to change the namespace of php generated classes. Default
+ // is empty. When this option is empty, the package name will be used for
+ // determining the namespace.
+ PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
+ // Use this option to change the namespace of php generated metadata classes.
+ // Default is empty. When this option is empty, the proto file name will be
+ // used for determining the namespace.
+ PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
+ // Use this option to change the package of ruby generated classes. Default
+ // is empty. When this option is not set, the package name will be used for
+ // determining the ruby package.
+ RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
+ // The parser stores options it doesn't recognize here.
+ // See the documentation for the "Options" section above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+}
+
+// Default values for FileOptions fields.
+const (
+ Default_FileOptions_JavaMultipleFiles = bool(false)
+ Default_FileOptions_JavaStringCheckUtf8 = bool(false)
+ Default_FileOptions_OptimizeFor = FileOptions_SPEED
+ Default_FileOptions_CcGenericServices = bool(false)
+ Default_FileOptions_JavaGenericServices = bool(false)
+ Default_FileOptions_PyGenericServices = bool(false)
+ Default_FileOptions_PhpGenericServices = bool(false)
+ Default_FileOptions_Deprecated = bool(false)
+ Default_FileOptions_CcEnableArenas = bool(true)
+)
+
+func (x *FileOptions) Reset() {
+ *x = FileOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileOptions) ProtoMessage() {}
+
+func (x *FileOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead.
+func (*FileOptions) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10}
+}
+
+var extRange_FileOptions = []protoiface.ExtensionRangeV1{
+ {Start: 1000, End: 536870911},
+}
+
+// Deprecated: Use FileOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
+func (*FileOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
+ return extRange_FileOptions
+}
+
+func (x *FileOptions) GetJavaPackage() string {
+ if x != nil && x.JavaPackage != nil {
+ return *x.JavaPackage
+ }
+ return ""
+}
+
+func (x *FileOptions) GetJavaOuterClassname() string {
+ if x != nil && x.JavaOuterClassname != nil {
+ return *x.JavaOuterClassname
+ }
+ return ""
+}
+
+func (x *FileOptions) GetJavaMultipleFiles() bool {
+ if x != nil && x.JavaMultipleFiles != nil {
+ return *x.JavaMultipleFiles
+ }
+ return Default_FileOptions_JavaMultipleFiles
+}
+
+// Deprecated: Do not use.
+func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+ if x != nil && x.JavaGenerateEqualsAndHash != nil {
+ return *x.JavaGenerateEqualsAndHash
+ }
+ return false
+}
+
+func (x *FileOptions) GetJavaStringCheckUtf8() bool {
+ if x != nil && x.JavaStringCheckUtf8 != nil {
+ return *x.JavaStringCheckUtf8
+ }
+ return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+ if x != nil && x.OptimizeFor != nil {
+ return *x.OptimizeFor
+ }
+ return Default_FileOptions_OptimizeFor
+}
+
+func (x *FileOptions) GetGoPackage() string {
+ if x != nil && x.GoPackage != nil {
+ return *x.GoPackage
+ }
+ return ""
+}
+
+func (x *FileOptions) GetCcGenericServices() bool {
+ if x != nil && x.CcGenericServices != nil {
+ return *x.CcGenericServices
+ }
+ return Default_FileOptions_CcGenericServices
+}
+
+func (x *FileOptions) GetJavaGenericServices() bool {
+ if x != nil && x.JavaGenericServices != nil {
+ return *x.JavaGenericServices
+ }
+ return Default_FileOptions_JavaGenericServices
+}
+
+func (x *FileOptions) GetPyGenericServices() bool {
+ if x != nil && x.PyGenericServices != nil {
+ return *x.PyGenericServices
+ }
+ return Default_FileOptions_PyGenericServices
+}
+
+func (x *FileOptions) GetPhpGenericServices() bool {
+ if x != nil && x.PhpGenericServices != nil {
+ return *x.PhpGenericServices
+ }
+ return Default_FileOptions_PhpGenericServices
+}
+
+func (x *FileOptions) GetDeprecated() bool {
+ if x != nil && x.Deprecated != nil {
+ return *x.Deprecated
+ }
+ return Default_FileOptions_Deprecated
+}
+
+func (x *FileOptions) GetCcEnableArenas() bool {
+ if x != nil && x.CcEnableArenas != nil {
+ return *x.CcEnableArenas
+ }
+ return Default_FileOptions_CcEnableArenas
+}
+
+func (x *FileOptions) GetObjcClassPrefix() string {
+ if x != nil && x.ObjcClassPrefix != nil {
+ return *x.ObjcClassPrefix
+ }
+ return ""
+}
+
+func (x *FileOptions) GetCsharpNamespace() string {
+ if x != nil && x.CsharpNamespace != nil {
+ return *x.CsharpNamespace
+ }
+ return ""
+}
+
+func (x *FileOptions) GetSwiftPrefix() string {
+ if x != nil && x.SwiftPrefix != nil {
+ return *x.SwiftPrefix
+ }
+ return ""
+}
+
+func (x *FileOptions) GetPhpClassPrefix() string {
+ if x != nil && x.PhpClassPrefix != nil {
+ return *x.PhpClassPrefix
+ }
+ return ""
+}
+
+func (x *FileOptions) GetPhpNamespace() string {
+ if x != nil && x.PhpNamespace != nil {
+ return *x.PhpNamespace
+ }
+ return ""
+}
+
+func (x *FileOptions) GetPhpMetadataNamespace() string {
+ if x != nil && x.PhpMetadataNamespace != nil {
+ return *x.PhpMetadataNamespace
+ }
+ return ""
+}
+
+func (x *FileOptions) GetRubyPackage() string {
+ if x != nil && x.RubyPackage != nil {
+ return *x.RubyPackage
+ }
+ return ""
+}
+
+func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if x != nil {
+ return x.UninterpretedOption
+ }
+ return nil
+}
+
+type MessageOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ // Set true to use the old proto1 MessageSet wire format for extensions.
+ // This is provided for backwards-compatibility with the MessageSet wire
+ // format. You should not use this for any other reason: It's less
+ // efficient, has fewer features, and is more complicated.
+ //
+ // The message must be defined exactly as follows:
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ // Note that the message cannot have any defined fields; MessageSets only
+ // have extensions.
+ //
+ // All extensions of your type must be singular messages; e.g. they cannot
+ // be int32s, enums, or repeated messages.
+ //
+ // Because this is an option, the above two restrictions are not enforced by
+ // the protocol compiler.
+ MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+ // Disables the generation of the standard "descriptor()" accessor, which can
+ // conflict with a field of the same name. This is meant to make migration
+ // from proto1 easier; new code should avoid fields named "descriptor".
+ NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+ // Is this message deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the message, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating messages.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Whether the message is an automatically generated map entry type for the
+ // maps field.
+ //
+ // For maps fields:
+ // map map_field = 1;
+ // The parsed descriptor looks like:
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
+ //
+ // Implementations may choose not to generate the map_entry=true message, but
+ // use a native map in the target language to hold the keys and values.
+ // The reflection APIs in such implementations still need to work as
+ // if the field is a repeated message field.
+ //
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+}
+
+// Default values for MessageOptions fields.
+const (
+ Default_MessageOptions_MessageSetWireFormat = bool(false)
+ Default_MessageOptions_NoStandardDescriptorAccessor = bool(false)
+ Default_MessageOptions_Deprecated = bool(false)
+)
+
+func (x *MessageOptions) Reset() {
+ *x = MessageOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MessageOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageOptions) ProtoMessage() {}
+
+func (x *MessageOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead.
+func (*MessageOptions) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11}
+}
+
+var extRange_MessageOptions = []protoiface.ExtensionRangeV1{
+ {Start: 1000, End: 536870911},
+}
+
+// Deprecated: Use MessageOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
+func (*MessageOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
+ return extRange_MessageOptions
+}
+
+func (x *MessageOptions) GetMessageSetWireFormat() bool {
+ if x != nil && x.MessageSetWireFormat != nil {
+ return *x.MessageSetWireFormat
+ }
+ return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+ if x != nil && x.NoStandardDescriptorAccessor != nil {
+ return *x.NoStandardDescriptorAccessor
+ }
+ return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (x *MessageOptions) GetDeprecated() bool {
+ if x != nil && x.Deprecated != nil {
+ return *x.Deprecated
+ }
+ return Default_MessageOptions_Deprecated
+}
+
+func (x *MessageOptions) GetMapEntry() bool {
+ if x != nil && x.MapEntry != nil {
+ return *x.MapEntry
+ }
+ return false
+}
+
+func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if x != nil {
+ return x.UninterpretedOption
+ }
+ return nil
+}
+
+type FieldOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ // The ctype option instructs the C++ code generator to use a different
+ // representation of the field than it normally would. See the specific
+ // options below. This option is not yet implemented in the open source
+ // release -- sorry, we'll try to include it in a future version!
+ Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+ // The packed option can be enabled for repeated primitive fields to enable
+ // a more efficient representation on the wire. Rather than repeatedly
+ // writing the tag and type for each element, the entire array is encoded as
+ // a single length-delimited blob. In proto3, only explicit setting it to
+ // false will avoid using packed encoding.
+ Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+ // The jstype option determines the JavaScript type used for values of the
+ // field. The option is permitted only for 64 bit integral and fixed types
+ // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
+ // is represented as JavaScript string, which avoids loss of precision that
+ // can happen when a large value is converted to a floating point JavaScript.
+ // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+ // use the JavaScript "number" type. The behavior of the default option
+ // JS_NORMAL is implementation dependent.
+ //
+ // This option is an enum to permit additional types to be added, e.g.
+ // goog.math.Integer.
+ Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+ // Should this field be parsed lazily? Lazy applies only to message-type
+ // fields. It means that when the outer message is initially parsed, the
+ // inner message's contents will not be parsed but instead stored in encoded
+ // form. The inner message will actually be parsed when it is first accessed.
+ //
+ // This is only a hint. Implementations are free to choose whether to use
+ // eager or lazy parsing regardless of the value of this option. However,
+ // setting this option true suggests that the protocol author believes that
+ // using lazy parsing on this field is worth the additional bookkeeping
+ // overhead typically needed to implement it.
+ //
+ // This option does not affect the public interface of any generated code;
+ // all method signatures remain the same. Furthermore, thread-safety of the
+ // interface is not affected by this option; const methods remain safe to
+ // call from multiple threads concurrently, while non-const methods continue
+ // to require exclusive access.
+ //
+ //
+ // Note that implementations may choose not to check required fields within
+ // a lazy sub-message. That is, calling IsInitialized() on the outer message
+ // may return true even if the inner message has missing required fields.
+ // This is necessary because otherwise the inner message would have to be
+ // parsed in order to perform the check, defeating the purpose of lazy
+ // parsing. An implementation which chooses not to check required fields
+ // must be consistent about it. That is, for any particular sub-message, the
+ // implementation must either *always* check its required fields, or *never*
+ // check its required fields, regardless of whether or not the message has
+ // been parsed.
+ Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+ // Is this field deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for accessors, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating fields.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // For Google-internal migration only. Do not use.
+ Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+}
+
+// Default values for FieldOptions fields.
+const (
+ Default_FieldOptions_Ctype = FieldOptions_STRING
+ Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL
+ Default_FieldOptions_Lazy = bool(false)
+ Default_FieldOptions_Deprecated = bool(false)
+ Default_FieldOptions_Weak = bool(false)
+)
+
+func (x *FieldOptions) Reset() {
+ *x = FieldOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldOptions) ProtoMessage() {}
+
+func (x *FieldOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead.
+func (*FieldOptions) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12}
+}
+
+var extRange_FieldOptions = []protoiface.ExtensionRangeV1{
+ {Start: 1000, End: 536870911},
+}
+
+// Deprecated: Use FieldOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
+func (*FieldOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
+ return extRange_FieldOptions
+}
+
+func (x *FieldOptions) GetCtype() FieldOptions_CType {
+ if x != nil && x.Ctype != nil {
+ return *x.Ctype
+ }
+ return Default_FieldOptions_Ctype
+}
+
+func (x *FieldOptions) GetPacked() bool {
+ if x != nil && x.Packed != nil {
+ return *x.Packed
+ }
+ return false
+}
+
+func (x *FieldOptions) GetJstype() FieldOptions_JSType {
+ if x != nil && x.Jstype != nil {
+ return *x.Jstype
+ }
+ return Default_FieldOptions_Jstype
+}
+
+func (x *FieldOptions) GetLazy() bool {
+ if x != nil && x.Lazy != nil {
+ return *x.Lazy
+ }
+ return Default_FieldOptions_Lazy
+}
+
+func (x *FieldOptions) GetDeprecated() bool {
+ if x != nil && x.Deprecated != nil {
+ return *x.Deprecated
+ }
+ return Default_FieldOptions_Deprecated
+}
+
+func (x *FieldOptions) GetWeak() bool {
+ if x != nil && x.Weak != nil {
+ return *x.Weak
+ }
+ return Default_FieldOptions_Weak
+}
+
+func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if x != nil {
+ return x.UninterpretedOption
+ }
+ return nil
+}
+
+type OneofOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+}
+
+func (x *OneofOptions) Reset() {
+ *x = OneofOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OneofOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OneofOptions) ProtoMessage() {}
+
+func (x *OneofOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead.
+func (*OneofOptions) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13}
+}
+
+var extRange_OneofOptions = []protoiface.ExtensionRangeV1{
+ {Start: 1000, End: 536870911},
+}
+
+// Deprecated: Use OneofOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
+func (*OneofOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
+ return extRange_OneofOptions
+}
+
+func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if x != nil {
+ return x.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ // Set this option to true to allow mapping different tag names to the same
+ // value.
+ AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+ // Is this enum deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating enums.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+}
+
+// Default values for EnumOptions fields.
+const (
+ Default_EnumOptions_Deprecated = bool(false)
+)
+
+func (x *EnumOptions) Reset() {
+ *x = EnumOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumOptions) ProtoMessage() {}
+
+func (x *EnumOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead.
+func (*EnumOptions) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14}
+}
+
+var extRange_EnumOptions = []protoiface.ExtensionRangeV1{
+ {Start: 1000, End: 536870911},
+}
+
+// Deprecated: Use EnumOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
+func (*EnumOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
+ return extRange_EnumOptions
+}
+
+func (x *EnumOptions) GetAllowAlias() bool {
+ if x != nil && x.AllowAlias != nil {
+ return *x.AllowAlias
+ }
+ return false
+}
+
+func (x *EnumOptions) GetDeprecated() bool {
+ if x != nil && x.Deprecated != nil {
+ return *x.Deprecated
+ }
+ return Default_EnumOptions_Deprecated
+}
+
+func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if x != nil {
+ return x.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumValueOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ // Is this enum value deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum value, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating enum values.
+ Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+}
+
+// Default values for EnumValueOptions fields.
+const (
+ Default_EnumValueOptions_Deprecated = bool(false)
+)
+
+func (x *EnumValueOptions) Reset() {
+ *x = EnumValueOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValueOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValueOptions) ProtoMessage() {}
+
+func (x *EnumValueOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead.
+func (*EnumValueOptions) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15}
+}
+
+var extRange_EnumValueOptions = []protoiface.ExtensionRangeV1{
+ {Start: 1000, End: 536870911},
+}
+
+// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
+func (*EnumValueOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
+ return extRange_EnumValueOptions
+}
+
+func (x *EnumValueOptions) GetDeprecated() bool {
+ if x != nil && x.Deprecated != nil {
+ return *x.Deprecated
+ }
+ return Default_EnumValueOptions_Deprecated
+}
+
+func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if x != nil {
+ return x.UninterpretedOption
+ }
+ return nil
+}
+
+type ServiceOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ // Is this service deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the service, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating services.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+}
+
+// Default values for ServiceOptions fields.
+const (
+ Default_ServiceOptions_Deprecated = bool(false)
+)
+
+func (x *ServiceOptions) Reset() {
+ *x = ServiceOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServiceOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceOptions) ProtoMessage() {}
+
+func (x *ServiceOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead.
+func (*ServiceOptions) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16}
+}
+
+var extRange_ServiceOptions = []protoiface.ExtensionRangeV1{
+ {Start: 1000, End: 536870911},
+}
+
+// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
+func (*ServiceOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
+ return extRange_ServiceOptions
+}
+
+func (x *ServiceOptions) GetDeprecated() bool {
+ if x != nil && x.Deprecated != nil {
+ return *x.Deprecated
+ }
+ return Default_ServiceOptions_Deprecated
+}
+
+func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if x != nil {
+ return x.UninterpretedOption
+ }
+ return nil
+}
+
+type MethodOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ // Is this method deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the method, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating methods.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+}
+
+// Default values for MethodOptions fields.
+const (
+ Default_MethodOptions_Deprecated = bool(false)
+ Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
+)
+
+func (x *MethodOptions) Reset() {
+ *x = MethodOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MethodOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MethodOptions) ProtoMessage() {}
+
+func (x *MethodOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead.
+func (*MethodOptions) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17}
+}
+
+var extRange_MethodOptions = []protoiface.ExtensionRangeV1{
+ {Start: 1000, End: 536870911},
+}
+
+// Deprecated: Use MethodOptions.ProtoReflect.Descriptor.ExtensionRanges instead.
+func (*MethodOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 {
+ return extRange_MethodOptions
+}
+
+func (x *MethodOptions) GetDeprecated() bool {
+ if x != nil && x.Deprecated != nil {
+ return *x.Deprecated
+ }
+ return Default_MethodOptions_Deprecated
+}
+
+func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
+ if x != nil && x.IdempotencyLevel != nil {
+ return *x.IdempotencyLevel
+ }
+ return Default_MethodOptions_IdempotencyLevel
+}
+
+func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if x != nil {
+ return x.UninterpretedOption
+ }
+ return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+ // The value of the uninterpreted option, in whatever type the tokenizer
+ // identified it as during parsing. Exactly one of these should be set.
+ IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+ PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+ NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+ StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+ AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+}
+
+func (x *UninterpretedOption) Reset() {
+ *x = UninterpretedOption{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UninterpretedOption) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UninterpretedOption) ProtoMessage() {}
+
+func (x *UninterpretedOption) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead.
+func (*UninterpretedOption) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+ if x != nil {
+ return x.Name
+ }
+ return nil
+}
+
+func (x *UninterpretedOption) GetIdentifierValue() string {
+ if x != nil && x.IdentifierValue != nil {
+ return *x.IdentifierValue
+ }
+ return ""
+}
+
+func (x *UninterpretedOption) GetPositiveIntValue() uint64 {
+ if x != nil && x.PositiveIntValue != nil {
+ return *x.PositiveIntValue
+ }
+ return 0
+}
+
+func (x *UninterpretedOption) GetNegativeIntValue() int64 {
+ if x != nil && x.NegativeIntValue != nil {
+ return *x.NegativeIntValue
+ }
+ return 0
+}
+
+func (x *UninterpretedOption) GetDoubleValue() float64 {
+ if x != nil && x.DoubleValue != nil {
+ return *x.DoubleValue
+ }
+ return 0
+}
+
+func (x *UninterpretedOption) GetStringValue() []byte {
+ if x != nil {
+ return x.StringValue
+ }
+ return nil
+}
+
+func (x *UninterpretedOption) GetAggregateValue() string {
+ if x != nil && x.AggregateValue != nil {
+ return *x.AggregateValue
+ }
+ return ""
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A Location identifies a piece of source code in a .proto file which
+ // corresponds to a particular definition. This information is intended
+ // to be useful to IDEs, code indexers, documentation generators, and similar
+ // tools.
+ //
+ // For example, say we have a file like:
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ // Let's look at just the field definition:
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ // We have the following locations:
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // Notes:
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendant. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
+ Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+}
+
+func (x *SourceCodeInfo) Reset() {
+ *x = SourceCodeInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceCodeInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceCodeInfo) ProtoMessage() {}
+
+func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead.
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+ if x != nil {
+ return x.Location
+ }
+ return nil
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+type GeneratedCodeInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An Annotation connects some span of text in generated code to an element
+ // of its generating .proto file.
+ Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+}
+
+func (x *GeneratedCodeInfo) Reset() {
+ *x = GeneratedCodeInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GeneratedCodeInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GeneratedCodeInfo) ProtoMessage() {}
+
+func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead.
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
+ if x != nil {
+ return x.Annotation
+ }
+ return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
+ Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+}
+
+func (x *DescriptorProto_ExtensionRange) Reset() {
+ *x = DescriptorProto_ExtensionRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DescriptorProto_ExtensionRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
+
+func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead.
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *DescriptorProto_ExtensionRange) GetStart() int32 {
+ if x != nil && x.Start != nil {
+ return *x.Start
+ }
+ return 0
+}
+
+func (x *DescriptorProto_ExtensionRange) GetEnd() int32 {
+ if x != nil && x.End != nil {
+ return *x.End
+ }
+ return 0
+}
+
+func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
+ if x != nil {
+ return x.Options
+ }
+ return nil
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
+}
+
+func (x *DescriptorProto_ReservedRange) Reset() {
+ *x = DescriptorProto_ReservedRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DescriptorProto_ReservedRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DescriptorProto_ReservedRange) ProtoMessage() {}
+
+func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead.
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *DescriptorProto_ReservedRange) GetStart() int32 {
+ if x != nil && x.Start != nil {
+ return *x.Start
+ }
+ return 0
+}
+
+func (x *DescriptorProto_ReservedRange) GetEnd() int32 {
+ if x != nil && x.End != nil {
+ return *x.End
+ }
+ return 0
+}
+
+// Range of reserved numeric values. Reserved values may not be used by
+// entries in the same enum. Reserved ranges may not overlap.
+//
+// Note that this is distinct from DescriptorProto.ReservedRange in that it
+// is inclusive such that it can appropriately represent the entire int32
+// domain.
+type EnumDescriptorProto_EnumReservedRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive.
+}
+
+func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
+ *x = EnumDescriptorProto_EnumReservedRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumDescriptorProto_EnumReservedRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
+
+func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead.
+func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
+ if x != nil && x.Start != nil {
+ return *x.Start
+ }
+ return 0
+}
+
+func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
+ if x != nil && x.End != nil {
+ return *x.End
+ }
+ return 0
+}
+
+// The name of the uninterpreted option. Each string represents a segment in
+// a dot-separated name. is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+ IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+}
+
+func (x *UninterpretedOption_NamePart) Reset() {
+ *x = UninterpretedOption_NamePart{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UninterpretedOption_NamePart) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UninterpretedOption_NamePart) ProtoMessage() {}
+
+func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead.
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0}
+}
+
+func (x *UninterpretedOption_NamePart) GetNamePart() string {
+ if x != nil && x.NamePart != nil {
+ return *x.NamePart
+ }
+ return ""
+}
+
+func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
+ if x != nil && x.IsExtension != nil {
+ return *x.IsExtension
+ }
+ return false
+}
+
+type SourceCodeInfo_Location struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifies which part of the FileDescriptorProto was defined at this
+ // location.
+ //
+ // Each element is a field number or an index. They form a path from
+ // the root FileDescriptorProto to the place where the definition. For
+ // example, this path:
+ // [ 4, 3, 2, 7, 1 ]
+ // refers to:
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ // This is because FileDescriptorProto.message_type has field number 4:
+ // repeated DescriptorProto message_type = 4;
+ // and DescriptorProto.field has field number 2:
+ // repeated FieldDescriptorProto field = 2;
+ // and FieldDescriptorProto.name has field number 1:
+ // optional string name = 1;
+ //
+ // Thus, the above path gives the location of a field name. If we removed
+ // the last element:
+ // [ 4, 3, 2, 7 ]
+ // this path refers to the whole field declaration (from the beginning
+ // of the label to the terminating semicolon).
+ Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+ // Always has exactly three or four elements: start line, start column,
+ // end line (optional, otherwise assumed same as start line), end column.
+ // These are packed into a single field for efficiency. Note that line
+ // and column numbers are zero-based -- typically you will want to add
+ // 1 to each before displaying to a user.
+ Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+ // If this SourceCodeInfo represents a complete declaration, these are any
+ // comments appearing before and after the declaration which appear to be
+ // attached to the declaration.
+ //
+ // A series of line comments appearing on consecutive lines, with no other
+ // tokens appearing on those lines, will be treated as a single comment.
+ //
+ // leading_detached_comments will keep paragraphs of comments that appear
+ // before (but not connected to) the current element. Each paragraph,
+ // separated by empty lines, will be one comment element in the repeated
+ // field.
+ //
+ // Only the comment content is provided; comment markers (e.g. //) are
+ // stripped out. For block comments, leading whitespace and an asterisk
+ // will be stripped from the beginning of each line other than the first.
+ // Newlines are included in the output.
+ //
+ // Examples:
+ //
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
+ //
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
+ //
+ // // Comment attached to qux.
+ // //
+ // // Another line attached to qux.
+ // optional double qux = 4;
+ //
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to qux or corge because there are blank lines separating it from
+ // // both.
+ //
+ // // Detached comment for corge paragraph 2.
+ //
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
+ //
+ // // ignored detached comments.
+ LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+ TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+ LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+}
+
+func (x *SourceCodeInfo_Location) Reset() {
+ *x = SourceCodeInfo_Location{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceCodeInfo_Location) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceCodeInfo_Location) ProtoMessage() {}
+
+func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead.
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
+func (x *SourceCodeInfo_Location) GetPath() []int32 {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+
+func (x *SourceCodeInfo_Location) GetSpan() []int32 {
+ if x != nil {
+ return x.Span
+ }
+ return nil
+}
+
+func (x *SourceCodeInfo_Location) GetLeadingComments() string {
+ if x != nil && x.LeadingComments != nil {
+ return *x.LeadingComments
+ }
+ return ""
+}
+
+func (x *SourceCodeInfo_Location) GetTrailingComments() string {
+ if x != nil && x.TrailingComments != nil {
+ return *x.TrailingComments
+ }
+ return ""
+}
+
+func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+ if x != nil {
+ return x.LeadingDetachedComments
+ }
+ return nil
+}
+
+type GeneratedCodeInfo_Annotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifies the element in the original source .proto file. This field
+ // is formatted the same as SourceCodeInfo.Location.path.
+ Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+ // Identifies the filesystem path to the original source .proto.
+ SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
+ // Identifies the starting offset in bytes in the generated code
+ // that relates to the identified object.
+ Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
+ // Identifies the ending offset in bytes in the generated code that
+ // relates to the identified offset. The end offset should be one past
+ // the last relevant byte (so the length of the text = end - begin).
+ End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+}
+
+func (x *GeneratedCodeInfo_Annotation) Reset() {
+ *x = GeneratedCodeInfo_Annotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GeneratedCodeInfo_Annotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
+
+func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead.
+func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0}
+}
+
+func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+
+func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string {
+ if x != nil && x.SourceFile != nil {
+ return *x.SourceFile
+ }
+ return ""
+}
+
+func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 {
+ if x != nil && x.Begin != nil {
+ return *x.Begin
+ }
+ return 0
+}
+
+func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 {
+ if x != nil && x.End != nil {
+ return *x.End
+ }
+ return 0
+}
+
+var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_descriptor_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
+ 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
+ 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65,
+ 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65,
+ 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20,
+ 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e,
+ 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65,
+ 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e,
+ 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43,
+ 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e,
+ 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36,
+ 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
+ 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
+ 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43,
+ 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74,
+ 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52,
+ 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63,
+ 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09,
+ 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52,
+ 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
+ 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65,
+ 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e,
+ 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d,
+ 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58,
+ 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+ 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
+ 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74,
+ 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65,
+ 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a,
+ 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c,
+ 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41,
+ 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36,
+ 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54,
+ 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54,
+ 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58,
+ 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
+ 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a,
+ 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10,
+ 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c,
+ 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
+ 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45,
+ 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50,
+ 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a,
+ 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
+ 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61,
+ 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65,
+ 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
+ 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52,
+ 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91,
+ 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21,
+ 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67,
+ 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f,
+ 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c,
+ 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61,
+ 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61,
+ 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28,
+ 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68,
+ 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c,
+ 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53,
+ 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f,
+ 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
+ 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a,
+ 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12,
+ 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f,
+ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20,
+ 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61,
+ 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a,
+ 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50,
+ 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
+ 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65,
+ 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+ 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70,
+ 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a,
+ 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79,
+ 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
+ 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12,
+ 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10,
+ 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26,
+ 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61,
+ 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+ 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f,
+ 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f,
+ 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+ 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
+ 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09,
+ 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52,
+ 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61,
+ 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b,
+ 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52,
+ 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c,
+ 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+ 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
+ 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a,
+ 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+ 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53,
+ 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10,
+ 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43,
+ 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a,
+ 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09,
+ 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a,
+ 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
+ 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f,
+ 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02,
+ 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08,
+ 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
+ 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+ 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
+ 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+ 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+ 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80,
+ 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
+ 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+ 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58,
+ 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+ 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
+ 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+ 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11,
+ 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65,
+ 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
+ 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69,
+ 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
+ 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+ 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+ 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65,
+ 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a,
+ 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44,
+ 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49,
+ 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07,
+ 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12,
+ 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62,
+ 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27,
+ 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61,
+ 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50,
+ 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74,
+ 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74,
+ 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
+ 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
+ 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
+ 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
+ 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
+ 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
+ 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
+ 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
+ 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
+ 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01,
+ 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49,
+ 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02,
+ 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67,
+ 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12,
+ 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e,
+ 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02,
+ 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e,
+}
+
+var (
+ file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
+ file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc
+)
+
+func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
+ file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData)
+ })
+ return file_google_protobuf_descriptor_proto_rawDescData
+}
+
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27)
+var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
+ (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type
+ (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label
+ (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode
+ (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType
+ (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType
+ (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel
+ (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet
+ (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto
+ (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto
+ (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions
+ (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto
+ (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto
+ (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto
+ (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto
+ (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto
+ (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto
+ (*FileOptions)(nil), // 16: google.protobuf.FileOptions
+ (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions
+ (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions
+ (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions
+ (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions
+ (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions
+ (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions
+ (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions
+ (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption
+ (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo
+ (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo
+ (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange
+ (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange
+ (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange
+ (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart
+ (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location
+ (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation
+}
+var file_google_protobuf_descriptor_proto_depIdxs = []int32{
+ 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+ 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+ 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+ 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+ 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+ 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+ 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+ 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+ 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+ 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+ 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+ 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+ 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+ 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+ 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+ 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+ 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+ 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+ 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+ 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+ 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+ 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+ 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+ 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+ 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+ 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+ 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+ 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+ 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+ 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+ 43, // [43:43] is the sub-list for method output_type
+ 43, // [43:43] is the sub-list for method input_type
+ 43, // [43:43] is the sub-list for extension type_name
+ 43, // [43:43] is the sub-list for extension extendee
+ 0, // [0:43] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_descriptor_proto_init() }
+func file_google_protobuf_descriptor_proto_init() {
+ if File_google_protobuf_descriptor_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileDescriptorSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileDescriptorProto); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DescriptorProto); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExtensionRangeOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ case 3:
+ return &v.extensionFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldDescriptorProto); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OneofDescriptorProto); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumDescriptorProto); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValueDescriptorProto); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServiceDescriptorProto); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MethodDescriptorProto); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ case 3:
+ return &v.extensionFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MessageOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ case 3:
+ return &v.extensionFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ case 3:
+ return &v.extensionFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OneofOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ case 3:
+ return &v.extensionFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ case 3:
+ return &v.extensionFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValueOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ case 3:
+ return &v.extensionFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServiceOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ case 3:
+ return &v.extensionFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MethodOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ case 3:
+ return &v.extensionFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UninterpretedOption); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceCodeInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GeneratedCodeInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DescriptorProto_ExtensionRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DescriptorProto_ReservedRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UninterpretedOption_NamePart); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceCodeInfo_Location); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GeneratedCodeInfo_Annotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
+ NumEnums: 6,
+ NumMessages: 27,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_descriptor_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs,
+ EnumInfos: file_google_protobuf_descriptor_proto_enumTypes,
+ MessageInfos: file_google_protobuf_descriptor_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_descriptor_proto = out.File
+ file_google_protobuf_descriptor_proto_rawDesc = nil
+ file_google_protobuf_descriptor_proto_goTypes = nil
+ file_google_protobuf_descriptor_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
new file mode 100644
index 0000000..c9ae921
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -0,0 +1,390 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+// Package timestamppb contains generated types for google/protobuf/timestamp.proto.
+//
+// The Timestamp message represents a timestamp,
+// an instant in time since the Unix epoch (January 1st, 1970).
+//
+//
+// Conversion to a Go Time
+//
+// The AsTime method can be used to convert a Timestamp message to a
+// standard Go time.Time value in UTC:
+//
+// t := ts.AsTime()
+// ... // make use of t as a time.Time
+//
+// Converting to a time.Time is a common operation so that the extensive
+// set of time-based operations provided by the time package can be leveraged.
+// See https://golang.org/pkg/time for more information.
+//
+// The AsTime method performs the conversion on a best-effort basis. Timestamps
+// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive)
+// are normalized during the conversion to a time.Time. To manually check for
+// invalid Timestamps per the documented limitations in timestamp.proto,
+// additionally call the CheckValid method:
+//
+// if err := ts.CheckValid(); err != nil {
+// ... // handle error
+// }
+//
+//
+// Conversion from a Go Time
+//
+// The timestamppb.New function can be used to construct a Timestamp message
+// from a standard Go time.Time value:
+//
+// ts := timestamppb.New(t)
+// ... // make use of ts as a *timestamppb.Timestamp
+//
+// In order to construct a Timestamp representing the current time, use Now:
+//
+// ts := timestamppb.Now()
+// ... // make use of ts as a *timestamppb.Timestamp
+//
+package timestamppb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ time "time"
+)
+
+// A Timestamp represents a point in time independent of any time zone or local
+// calendar, encoded as a count of seconds and fractions of seconds at
+// nanosecond resolution. The count is relative to an epoch at UTC midnight on
+// January 1, 1970, in the proleptic Gregorian calendar which extends the
+// Gregorian calendar backwards to year one.
+//
+// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
+// second table is needed for interpretation, using a [24-hour linear
+// smear](https://developers.google.com/time/smear).
+//
+// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
+// restricting to that range, we ensure that we can convert to and from [RFC
+// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from Java `Instant.now()`.
+//
+// Instant now = Instant.now();
+//
+// Timestamp timestamp =
+// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
+// .setNanos(now.getNano()).build();
+//
+//
+// Example 6: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard
+// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using
+// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
+// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
+// the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+// Now constructs a new Timestamp from the current time.
+func Now() *Timestamp {
+ return New(time.Now())
+}
+
+// New constructs a new Timestamp from the provided time.Time.
+func New(t time.Time) *Timestamp {
+ return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}
+}
+
+// AsTime converts x to a time.Time.
+func (x *Timestamp) AsTime() time.Time {
+ return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()
+}
+
+// IsValid reports whether the timestamp is valid.
+// It is equivalent to CheckValid == nil.
+func (x *Timestamp) IsValid() bool {
+ return x.check() == 0
+}
+
+// CheckValid returns an error if the timestamp is invalid.
+// In particular, it checks whether the value represents a date that is
+// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.
+// An error is reported for a nil Timestamp.
+func (x *Timestamp) CheckValid() error {
+ switch x.check() {
+ case invalidNil:
+ return protoimpl.X.NewError("invalid nil Timestamp")
+ case invalidUnderflow:
+ return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x)
+ case invalidOverflow:
+ return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x)
+ case invalidNanos:
+ return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x)
+ default:
+ return nil
+ }
+}
+
+const (
+ _ = iota
+ invalidNil
+ invalidUnderflow
+ invalidOverflow
+ invalidNanos
+)
+
+func (x *Timestamp) check() uint {
+ const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive
+ const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive
+ secs := x.GetSeconds()
+ nanos := x.GetNanos()
+ switch {
+ case x == nil:
+ return invalidNil
+ case secs < minTimestamp:
+ return invalidUnderflow
+ case secs > maxTimestamp:
+ return invalidOverflow
+ case nanos < 0 || nanos >= 1e9:
+ return invalidNanos
+ default:
+ return 0
+ }
+}
+
+func (x *Timestamp) Reset() {
+ *x = Timestamp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Timestamp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Timestamp) ProtoMessage() {}
+
+func (x *Timestamp) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead.
+func (*Timestamp) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Timestamp) GetSeconds() int64 {
+ if x != nil {
+ return x.Seconds
+ }
+ return 0
+}
+
+func (x *Timestamp) GetNanos() int32 {
+ if x != nil {
+ return x.Nanos
+ }
+ return 0
+}
+
+var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_timestamp_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
+ 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
+ 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
+ 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
+ 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
+ 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
+ file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc
+)
+
+func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
+ file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData)
+ })
+ return file_google_protobuf_timestamp_proto_rawDescData
+}
+
+var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_timestamp_proto_goTypes = []interface{}{
+ (*Timestamp)(nil), // 0: google.protobuf.Timestamp
+}
+var file_google_protobuf_timestamp_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_timestamp_proto_init() }
+func file_google_protobuf_timestamp_proto_init() {
+ if File_google_protobuf_timestamp_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Timestamp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_timestamp_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs,
+ MessageInfos: file_google_protobuf_timestamp_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_timestamp_proto = out.File
+ file_google_protobuf_timestamp_proto_rawDesc = nil
+ file_google_protobuf_timestamp_proto_goTypes = nil
+ file_google_protobuf_timestamp_proto_depIdxs = nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
new file mode 100644
index 0000000..f95f227
--- /dev/null
+++ b/vendor/modules.txt
@@ -0,0 +1,74 @@
+# github.com/cyrilix/robocar-base v0.1.4
+## explicit; go 1.17
+github.com/cyrilix/robocar-base/cli
+github.com/cyrilix/robocar-base/service
+# github.com/cyrilix/robocar-protobuf/go v1.0.3
+## explicit; go 1.17
+github.com/cyrilix/robocar-protobuf/go/events
+# github.com/eclipse/paho.mqtt.golang v1.3.5
+## explicit; go 1.14
+github.com/eclipse/paho.mqtt.golang
+github.com/eclipse/paho.mqtt.golang/packets
+# github.com/golang/protobuf v1.5.2
+## explicit; go 1.9
+github.com/golang/protobuf/proto
+github.com/golang/protobuf/ptypes/timestamp
+# github.com/gorilla/websocket v1.4.2
+## explicit; go 1.12
+github.com/gorilla/websocket
+# github.com/mattn/go-pointer v0.0.1
+## explicit
+github.com/mattn/go-pointer
+# github.com/mattn/go-tflite v1.0.2-0.20210524070808-ba19dc99415b
+## explicit; go 1.13
+github.com/mattn/go-tflite
+github.com/mattn/go-tflite/delegates
+github.com/mattn/go-tflite/delegates/edgetpu
+# go.uber.org/atomic v1.7.0
+## explicit; go 1.13
+go.uber.org/atomic
+# go.uber.org/multierr v1.6.0
+## explicit; go 1.12
+go.uber.org/multierr
+# go.uber.org/zap v1.19.1
+## explicit; go 1.13
+go.uber.org/zap
+go.uber.org/zap/buffer
+go.uber.org/zap/internal/bufferpool
+go.uber.org/zap/internal/color
+go.uber.org/zap/internal/exit
+go.uber.org/zap/zapcore
+# golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
+## explicit; go 1.11
+golang.org/x/net/internal/socks
+golang.org/x/net/proxy
+# google.golang.org/protobuf v1.26.0
+## explicit; go 1.9
+google.golang.org/protobuf/encoding/prototext
+google.golang.org/protobuf/encoding/protowire
+google.golang.org/protobuf/internal/descfmt
+google.golang.org/protobuf/internal/descopts
+google.golang.org/protobuf/internal/detrand
+google.golang.org/protobuf/internal/encoding/defval
+google.golang.org/protobuf/internal/encoding/messageset
+google.golang.org/protobuf/internal/encoding/tag
+google.golang.org/protobuf/internal/encoding/text
+google.golang.org/protobuf/internal/errors
+google.golang.org/protobuf/internal/filedesc
+google.golang.org/protobuf/internal/filetype
+google.golang.org/protobuf/internal/flags
+google.golang.org/protobuf/internal/genid
+google.golang.org/protobuf/internal/impl
+google.golang.org/protobuf/internal/order
+google.golang.org/protobuf/internal/pragma
+google.golang.org/protobuf/internal/set
+google.golang.org/protobuf/internal/strs
+google.golang.org/protobuf/internal/version
+google.golang.org/protobuf/proto
+google.golang.org/protobuf/reflect/protodesc
+google.golang.org/protobuf/reflect/protoreflect
+google.golang.org/protobuf/reflect/protoregistry
+google.golang.org/protobuf/runtime/protoiface
+google.golang.org/protobuf/runtime/protoimpl
+google.golang.org/protobuf/types/descriptorpb
+google.golang.org/protobuf/types/known/timestamppb