diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..45d6168
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,33 @@
+FROM golang:alpine as gobuilder
+
+
+FROM cyrilix/opencv-buildstage:4.2.0 as builder
+
+LABEL maintainer="Cyrille Nofficial"
+
+COPY --from=gobuilder /usr/local/go /usr/local/go
+ENV GOPATH /go
+ENV PATH /usr/local/go/bin:$GOPATH/bin:/usr/local/go/bin:$PATH
+
+RUN mkdir -p "/src $GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
+
+ENV PKG_CONFIG_PATH /usr/local/lib/pkgconfig:/usr/local/lib64/pkgconfig
+ENV CGO_CPPFLAGS -I/usr/local/include
+ENV CGO_CXXFLAGS "--std=c++1z"
+
+WORKDIR /src
+ADD . .
+
+RUN CGO_LDFLAGS="$(pkg-config --libs opencv4)" \
+ CGO_ENABLED=1 CGO_CPPFLAGS=${CGO_CPPFLAGS} CGO_CXXFLAGS=${CGO_CXXFLAGS} CGO_LDFLAGS=${CGO_LDFLAGS} GOOS=${GOOS} GOARCH=${GOARCH} GOARM=${GOARM} go build -mod vendor -a ./cmd/rc-road/
+
+
+
+
+FROM cyrilix/opencv-runtime:4.2.0
+
+ENV LD_LIBRARY_PATH /usr/local/lib:/usr/local/lib64
+
+USER 1234
+COPY --from=builder /src/rc-road /go/bin/rc-road
+ENTRYPOINT ["/go/bin/rc-road"]
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..088a7ac
--- /dev/null
+++ b/README.md
@@ -0,0 +1,10 @@
+# robocar-road
+
+Process camera frames to detect road contours
+
+## Docker
+
+To build images, run:
+```bash
+docker buildx build . --platform linux/arm/7,linux/arm64,linux/amd64 -t cyrilix/robocar-road
+```
diff --git a/cmd/rc-road/rc-road.go b/cmd/rc-road/rc-road.go
new file mode 100644
index 0000000..790552d
--- /dev/null
+++ b/cmd/rc-road/rc-road.go
@@ -0,0 +1,56 @@
+package main
+
+import (
+ "flag"
+ "github.com/cyrilix/robocar-base/cli"
+ "github.com/cyrilix/robocar-road/part"
+ "log"
+ "os"
+)
+
+const (
+ DefaultClientId = "robocar-road"
+ DefaultHorizon = 20
+)
+
+func main() {
+ var mqttBroker, username, password, clientId string
+ var cameraTopic, roadTopic string
+ var horizon int
+
+ err := cli.SetIntDefaultValueFromEnv(&horizon, "HORIZON", DefaultHorizon)
+ if err != nil {
+ log.Printf("unable to parse horizon value arg: %v", err)
+ }
+
+ mqttQos := cli.InitIntFlag("MQTT_QOS", 0)
+ _, mqttRetain := os.LookupEnv("MQTT_RETAIN")
+
+ cli.InitMqttFlags(DefaultClientId, &mqttBroker, &username, &password, &clientId, &mqttQos, &mqttRetain)
+
+ flag.StringVar(&roadTopic, "mqtt-topic-road", os.Getenv("MQTT_TOPIC_ROAD"), "Mqtt topic to publish road detection result, use MQTT_TOPIC_ROAD if args not set")
+ flag.StringVar(&cameraTopic, "mqtt-topic-camera", os.Getenv("MQTT_TOPIC_CAMERA"), "Mqtt topic that contains camera frame values, use MQTT_TOPIC_CAMERA if args not set")
+ flag.IntVar(&horizon, "horizon", horizon, "Limit horizon in pixels from top, use HORIZON if args not set")
+
+ flag.Parse()
+ if len(os.Args) <= 1 {
+ flag.PrintDefaults()
+ os.Exit(1)
+ }
+
+ client, err := cli.Connect(mqttBroker, username, password, clientId)
+ if err != nil {
+ log.Fatalf("unable to connect to mqtt bus: %v", err)
+ }
+ defer client.Disconnect(50)
+
+ p := part.NewRoadPart(client, horizon, cameraTopic, roadTopic)
+ defer p.Stop()
+
+ cli.HandleExit(p)
+
+ err = p.Start()
+ if err != nil {
+ log.Fatalf("unable to start service: %v", err)
+ }
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..e1d97ab
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,15 @@
+module github.com/cyrilix/robocar-road
+
+go 1.13
+
+require (
+ github.com/cyrilix/robocar-base v0.0.0-20200103000136-b08c9be9a69a
+ github.com/cyrilix/robocar-protobuf/go v0.0.0-20200103235248-776649d250ff
+ github.com/eclipse/paho.mqtt.golang v1.2.0
+ github.com/golang/protobuf v1.3.2
+ github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
+ github.com/sirupsen/logrus v1.4.2
+ gocv.io/x/gocv v0.22.0
+ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
+ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..696a3a7
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,86 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/cyrilix/robocar-base v0.0.0-20200103000136-b08c9be9a69a h1:Gznzd8APE9C+rkN3ePlKajYhgmNaCO7aJQ2WeNvoSt8=
+github.com/cyrilix/robocar-base v0.0.0-20200103000136-b08c9be9a69a/go.mod h1:jRQ+lJAHKkdcjwS5vt2t5LX2zM+bxX+gKffixkc2lbA=
+github.com/cyrilix/robocar-protobuf/go v0.0.0-20200103235248-776649d250ff h1:o92c28z6MCBh+WohNO4pkpKHumWcYjnrg4iW9U79N7s=
+github.com/cyrilix/robocar-protobuf/go v0.0.0-20200103235248-776649d250ff/go.mod h1:I+i6Ujns+4DmRmmUej56MItlmT4K2zlMZ35vZrHEfQ4=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v0.7.3-0.20190506211059-b20a14b54661/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/go-redis/redis v6.15.6+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/testcontainers/testcontainers-go v0.0.9/go.mod h1:0Qe9qqjNZgxHzzdHPWwmQ2D49FFO7920hLdJ4yUJXJI=
+gocv.io/x/gocv v0.22.0 h1:pv+tcjcoW/xsaM/nfrzMK5PEEHYe2ND/LQRoyBpgjsg=
+gocv.io/x/gocv v0.22.0/go.mod h1:7Ju5KbPo+R85evmlhhKPVMwXtgDRNX/PtfVfbToSrLU=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8 h1:JA8d3MPx/IToSyXZG/RhwYEtfrKO1Fxrqe8KrkiLXKM=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/part/opencv.go b/part/opencv.go
new file mode 100644
index 0000000..912a563
--- /dev/null
+++ b/part/opencv.go
@@ -0,0 +1,176 @@
+package part
+
+import (
+ "github.com/cyrilix/robocar-protobuf/go/events"
+ log "github.com/sirupsen/logrus"
+ "gocv.io/x/gocv"
+ "image"
+ "image/color"
+)
+
+const FILLED = -1
+
+type RoadDetector struct {
+ kernelSize int
+ morphoIterations int
+ approxPolyEpsilonFactor float64
+ previousBoundingBox *image.Rectangle
+ previousRoad *[]image.Point
+ thresholdLowerBound, thresholdUpperBound gocv.Mat
+}
+
+func (rd *RoadDetector) Close() error {
+ var err error
+ err = nil
+ if err1 := rd.thresholdLowerBound.Close(); err1 != nil {
+ log.Errorf("unable to close thresholdLowerBound resource: %v", err1)
+ err = err1
+ }
+ if err2 := rd.thresholdUpperBound.Close(); err2 != nil {
+ log.Errorf("unable to close thresholdUpperBound resource: %v", err2)
+ err = err2
+ }
+ return err
+}
+
+func NewRoadDetector() *RoadDetector {
+
+ return &RoadDetector{
+ kernelSize: 4,
+ morphoIterations: 3,
+ approxPolyEpsilonFactor: 0.01,
+ thresholdLowerBound: gocv.NewMatFromScalar(gocv.NewScalar(120.0, 120.0, 120.0, 120.0), gocv.MatTypeCV8U),
+ thresholdUpperBound: gocv.NewMatFromScalar(gocv.NewScalar(250.0, 250.0, 250.0, 250.0), gocv.MatTypeCV8U),
+ }
+}
+
+func (rd *RoadDetector) DetectRoadContour(imgGray *gocv.Mat, horizonRow int) *[]image.Point {
+
+ kernel := gocv.NewMatWithSizeFromScalar(gocv.NewScalar(1, 1, 1, 1), rd.kernelSize, rd.kernelSize, gocv.MatTypeCV8U)
+
+ img := imgGray.Clone()
+ defer func() {
+ if err := img.Close(); err != nil {
+ log.Warnf("unable to close mat resource: %v", err)
+ }
+ }()
+
+ for i := rd.morphoIterations; i > 0; i-- {
+ gocv.Dilate(img, &img, kernel)
+ }
+ for i := rd.morphoIterations; i > 0; i-- {
+ gocv.Erode(img, &img, kernel)
+ }
+ gocv.Dilate(img, &img, kernel)
+
+ gocv.Threshold(img, &img, 180, 255, gocv.ThresholdBinaryInv)
+
+ // Draw black rectangle above horizon
+ horizon := gocv.NewMatWithSize(1, 4, gocv.MatTypeCV32S)
+ horizon.SetIntAt(0, 0, 0) // X1
+ horizon.SetIntAt(0, 1, int32(horizonRow)) // Y1
+ horizon.SetIntAt(0, 2, int32(imgGray.Cols())) // X2
+ horizon.SetIntAt(0, 3, int32(horizonRow)) // Y2
+ rectangle := image.Rect(0, 0, int(horizon.GetIntAt(0, 2)), int(horizon.GetIntAt(0, 3)))
+ gocv.Rectangle(&img, rectangle, color.RGBA{0, 0, 0, 0}, FILLED)
+
+ return rd.detectRoadContour(&img)
+}
+
+func (rd *RoadDetector) detectRoadContour(imgInversed *gocv.Mat) *[]image.Point {
+
+ var (
+ epsilon float64
+ cntr []image.Point
+ )
+
+ cntrs := gocv.FindContours(*imgInversed, gocv.RetrievalExternal, gocv.ChainApproxSimple)
+
+ if len(cntrs) == 0 {
+ emptyContours := make([]image.Point, 0)
+ return &emptyContours
+ } else if len(cntrs) == 1 {
+ epsilon = rd.approxPolyEpsilonFactor * gocv.ArcLength(cntrs[0], true)
+ cntr = cntrs[0]
+ } else {
+ // Search biggest contour
+ peris := make([]float64, len(cntrs))
+ maxArcIdx := 0
+ maxArcValue := 0.
+ for i, c := range cntrs {
+ peri := gocv.ArcLength(c, true)
+ peris[i] = peri
+ if peri > maxArcValue {
+ maxArcValue = peri
+ maxArcIdx = i
+ }
+ cntr = cntrs[maxArcIdx]
+ }
+ epsilon = rd.approxPolyEpsilonFactor * peris[maxArcIdx]
+ }
+ approx := gocv.ApproxPolyDP(cntr, epsilon, true)
+ return &approx
+}
+
+var EllipseNotFound = events.Ellipse{Confidence: 0.}
+
+func (rd *RoadDetector) ComputeEllipsis(road *[]image.Point) *events.Ellipse {
+ if len(*road) < 5 {
+ return &EllipseNotFound
+ }
+
+ rotatedRect := gocv.FitEllipse(*road)
+
+ trust := rd.computeTrustFromCenter(&rotatedRect.Center)
+ log.Debugf("Trust: %v", trust)
+
+ return &events.Ellipse{
+ Center: &events.Point{
+ X: int32(rotatedRect.Center.X),
+ Y: int32(rotatedRect.Center.Y),
+ },
+ Width: int32(rotatedRect.Width),
+ Height: int32(rotatedRect.Height),
+ Angle: float32(rotatedRect.Angle),
+ Confidence: rd.computeTrustFromCenter(&rotatedRect.Center),
+ }
+}
+
+func (rd *RoadDetector) computeTrustFromCenter(ellipsisCenter *image.Point) float32 {
+ safeMinX := 48
+ safeMaxX := 115
+ safeMinY := 69
+ safeMaxY := 119
+
+ if safeMinX <= ellipsisCenter.X && ellipsisCenter.X <= safeMaxX && safeMinY <= ellipsisCenter.Y && ellipsisCenter.Y <= safeMaxY {
+ return 1.0
+ }
+
+ if safeMinX <= ellipsisCenter.X && ellipsisCenter.X <= safeMaxX {
+ return rd.computeTrustOnAxis(safeMaxY, safeMinY, ellipsisCenter.Y)
+ }
+
+ if safeMinY <= ellipsisCenter.Y && ellipsisCenter.Y <= safeMaxY {
+ return rd.computeTrustOnAxis(safeMaxX, safeMinX, ellipsisCenter.X)
+ }
+
+ return rd.computeTrustOnAxis(safeMaxY, safeMinY, ellipsisCenter.Y) * rd.computeTrustOnAxis(safeMaxX, safeMinX, ellipsisCenter.X)
+}
+
+func (rd *RoadDetector) computeTrustOnAxis(safeMax, safeMin, value int) float32 {
+ trust := 1.
+ if value > safeMax {
+ trust = 1. / float64(value-safeMax)
+ } else if value < safeMin {
+ trust = 1. / float64(safeMin-value)
+ }
+ trust = trust * 10.
+ if trust > 0.9 {
+ trust = 0.9
+ }
+ if trust < 0. {
+ trust = 0.
+ }
+ return float32(trust)
+
+}
diff --git a/part/opencv_test.go b/part/opencv_test.go
new file mode 100644
index 0000000..d6d15d3
--- /dev/null
+++ b/part/opencv_test.go
@@ -0,0 +1,193 @@
+package part
+
+import (
+ "fmt"
+ "github.com/cyrilix/robocar-protobuf/go/events"
+ log "github.com/sirupsen/logrus"
+ "gocv.io/x/gocv"
+ "image"
+ "image/color"
+ "testing"
+)
+
+func toGray(imgColor gocv.Mat) *gocv.Mat {
+ imgGray := gocv.NewMatWithSize(imgColor.Rows(), imgColor.Cols(), gocv.MatTypeCV8UC1)
+ gocv.CvtColor(imgColor, &imgGray, gocv.ColorRGBToGray)
+ return &imgGray
+}
+
+func image1() *gocv.Mat {
+ img := gocv.IMRead("testdata/image.jpg", gocv.IMReadColor)
+ return &img
+}
+
+func image2() *gocv.Mat {
+ img := gocv.IMRead("testdata/image2.jpg", gocv.IMReadColor)
+ return &img
+}
+
+func image3() *gocv.Mat {
+ img := gocv.IMRead("testdata/image3.jpg", gocv.IMReadColor)
+ return &img
+}
+
+func image4() *gocv.Mat {
+ img := gocv.IMRead("testdata/image4.jpg", gocv.IMReadColor)
+ return &img
+}
+
+func image5() *gocv.Mat {
+ img := gocv.IMRead("testdata/image5.jpg", gocv.IMReadColor)
+ return &img
+}
+
+func TestRoadDetection_DetectRoadContour(t *testing.T) {
+ rd := NewRoadDetector()
+
+ img1 := image1()
+ defer img1.Close()
+ img2 := image2()
+ defer img2.Close()
+ img3 := image3()
+ defer img3.Close()
+ img4 := image4()
+ defer img4.Close()
+ img5 := image5()
+ defer img5.Close()
+
+ cases := []struct {
+ name string
+ img *gocv.Mat
+ horizon int
+ expectedContour []image.Point
+ }{
+ {"image1", img1, 20,
+ []image.Point{image.Point{0, 45}, image.Point{0, 127}, image.Point{144, 127}, image.Point{95, 21}, image.Point{43, 21}},
+ },
+ {"image2", img2, 20,
+ []image.Point{{159,69}, {128,53}, {125,41}, {113,42}, {108,21}, {87,21}, {79,41}, {72,30}, {44,39}, {29,34}, {0,67}, {0,127}, {159,127}, {152,101},},
+ },
+ {"image3", img3, 20,
+ []image.Point{{97,21}, {59,127}, {159,127}, {159,36}, {138,21},},
+ },
+ {"image4", img4, 20,
+ []image.Point{{0,21}, {0,77}, {68,22}, {0,96}, {0,127}, {159,127}, {159,21},},
+ },
+ {"image5", img5, 20,
+ []image.Point{{159,32}, {100,36}, {29,60}, {0,79}, {0,127}, {159,127},},
+ },
+ }
+
+ for _, c := range cases {
+ imgGray := toGray(*c.img)
+ contours := rd.DetectRoadContour(imgGray, c.horizon)
+ imgGray.Close()
+
+ log.Infof("[%v] contour: %v", c.name, *contours)
+ if len(*contours) != len(c.expectedContour) {
+ t.Errorf("[%v] bad contour size: %v point(s), wants %v", c.name, len(*contours), len(c.expectedContour))
+ }
+ for idx, pt := range c.expectedContour {
+ if pt != (*contours)[idx] {
+ t.Errorf("[%v] bad point: %v, wants %v", c.name, (*contours)[idx], pt)
+ }
+ }
+ debugContour(*c.img, contours, fmt.Sprintf("/tmp/%v.jpg", c.name))
+ }
+}
+
+func debugContour(img gocv.Mat, contour *[]image.Point, imgPath string) {
+ imgColor := img.Clone()
+ defer imgColor.Close()
+
+ gocv.DrawContours(&imgColor, [][]image.Point{*contour,}, 0, color.RGBA{
+ R: 0,
+ G: 255,
+ B: 0,
+ A: 255,
+ }, 1)
+ gocv.IMWrite(imgPath, imgColor)
+}
+
+func TestRoadDetector_ComputeEllipsis(t *testing.T) {
+ rd := NewRoadDetector()
+
+ cases := []struct {
+ name string
+ contour []image.Point
+ expectedEllipse events.Ellipse
+ }{
+ {"image1",
+ []image.Point{image.Point{0, 45}, image.Point{0, 127}, image.Point{144, 127}, image.Point{95, 21}, image.Point{43, 21}},
+ events.Ellipse{
+ Center: &events.Point{
+ X: 71,
+ Y: 87,
+ },
+ Width: 139,
+ Height: 176,
+ Angle: 92.66927,
+ Confidence: 1.,
+ },
+ },
+ {"image2",
+ []image.Point{{159,69}, {128,53}, {125,41}, {113,42}, {108,21}, {87,21}, {79,41}, {72,30}, {44,39}, {29,34}, {0,67}, {0,127}, {159,127}, {152,101},},
+ events.Ellipse{
+ Center: &events.Point{
+ X: 77,
+ Y: 102,
+ },
+ Width: 152,
+ Height: 168,
+ Angle: 94.70433,
+ Confidence: 1.,
+ },
+ },
+ {"image3",
+ []image.Point{{97,21}, {59,127}, {159,127}, {159,36}, {138,21},},
+ events.Ellipse{
+ Center: &events.Point{
+ X: 112,
+ Y: 86,
+ },
+ Width: 122,
+ Height: 140,
+ Angle: 20.761106,
+ Confidence: 1.,
+ },
+ },
+ {"image4",
+ []image.Point{{0,21}, {0,77}, {68,22}, {0,96}, {0,127}, {159,127}, {159,21},},
+ events.Ellipse{
+ Center: &events.Point{
+ X: 86,
+ Y: 78,
+ },
+ Width: 154,
+ Height: 199,
+ Angle: 90.45744,
+ Confidence: 1.,
+ },
+ },
+ {"image5",
+ []image.Point{{159,32}, {100,36}, {29,60}, {0,79}, {0,127}, {159,127},},
+ events.Ellipse{
+ Center: &events.Point{
+ X: 109,
+ Y: 87,
+ },
+ Width: 103,
+ Height: 247,
+ Angle: 79.6229,
+ Confidence: 1.0,
+ },
+ },
+ }
+
+ for _, c := range cases{
+ ellipse := rd.ComputeEllipsis(&c.contour)
+ if ellipse.String() != c.expectedEllipse.String(){
+ t.Errorf("ComputeEllipsis(%v): %v, wants %v", c.name, ellipse.String(), c.expectedEllipse.String())
+ }
+ }
+}
diff --git a/part/part.go b/part/part.go
new file mode 100644
index 0000000..11a4df7
--- /dev/null
+++ b/part/part.go
@@ -0,0 +1,146 @@
+package part
+
+import (
+ "github.com/cyrilix/robocar-base/service"
+ "github.com/cyrilix/robocar-protobuf/go/events"
+ mqtt "github.com/eclipse/paho.mqtt.golang"
+ "github.com/golang/protobuf/proto"
+ log "github.com/sirupsen/logrus"
+ "gocv.io/x/gocv"
+)
+
+type RoadPart struct {
+ client mqtt.Client
+ frameChan chan frameToProcess
+ readyForNext chan interface{}
+ cancel chan interface{}
+ roadDetector *RoadDetector
+ horizon int
+ cameraTopic, roadTopic string
+}
+
+func NewRoadPart(client mqtt.Client, horizon int, cameraTopic, roadTopic string) *RoadPart {
+ return &RoadPart{
+ client: client,
+ frameChan: make(chan frameToProcess),
+ readyForNext: make(chan interface{}, 1),
+ cancel: make(chan interface{}),
+ roadDetector: NewRoadDetector(),
+ horizon: horizon,
+ cameraTopic: cameraTopic,
+ roadTopic: roadTopic,
+ }
+}
+
+func (r *RoadPart) Start() error {
+ registerCallBacks(r)
+
+ ready := true
+ var frame = frameToProcess{}
+ defer func() {
+ if err := frame.Close(); err != nil {
+ log.Errorf("unable to close msg: %v", err)
+ }
+ }()
+
+ for {
+ select {
+ case f := <-r.frameChan:
+ log.Debug("new msg")
+ oldFrame := frame
+ frame = f
+ if err := oldFrame.Close(); err != nil {
+ log.Errorf("unable to close msg: %v", err)
+ }
+ if ready {
+ log.Debug("process msg")
+ go r.processFrame(&frame)
+ ready = false
+ }
+ case <-r.readyForNext:
+ ready = true
+ case <-r.cancel:
+ log.Infof("Stop service")
+ return nil
+ }
+ }
+}
+
+var registerCallBacks = func(r *RoadPart) {
+ err := service.RegisterCallback(r.client, r.cameraTopic, r.OnFrame)
+ if err != nil {
+ log.Panicf("unable to register callback to topic %v:%v", r.cameraTopic, err)
+ }
+}
+
+func (o *RoadPart) Stop() {
+ defer func() {
+ if err := o.roadDetector.Close(); err != nil {
+ log.Errorf("unable to close roadDetector: %v", err)
+ }
+ }()
+ close(o.readyForNext)
+ close(o.cancel)
+ service.StopService("road", o.client, o.roadTopic)
+}
+
+func (r *RoadPart) OnFrame(_ mqtt.Client, msg mqtt.Message) {
+ var frameMsg events.FrameMessage
+ err := proto.Unmarshal(msg.Payload(), &frameMsg)
+ if err != nil {
+ log.Errorf("unable to unmarshal %T message: %v", frameMsg, err)
+ return
+ }
+
+ img, err := gocv.IMDecode(frameMsg.GetFrame(), gocv.IMReadUnchanged)
+ if err != nil {
+ log.Errorf("unable to decode image: %v", err)
+ return
+ }
+ frame := frameToProcess{
+ ref: frameMsg.GetId(),
+ Mat: img,
+ }
+ r.frameChan <- frame
+}
+
+type frameToProcess struct {
+ ref *events.FrameRef
+ gocv.Mat
+}
+
+func (r *RoadPart) processFrame(frame *frameToProcess) {
+ img := frame.Mat
+ imgGray := gocv.NewMatWithSize(img.Rows(), img.Cols(), gocv.MatTypeCV8UC1)
+ defer func() {
+ if err := imgGray.Close(); err != nil {
+ log.Warnf("unable to close Mat resource: %v", err)
+ }
+ }()
+ gocv.CvtColor(img, &imgGray, gocv.ColorRGBToGray)
+
+ road := r.roadDetector.DetectRoadContour(&imgGray, r.horizon)
+ ellipse := r.roadDetector.ComputeEllipsis(road)
+
+ cntr := make([]*events.Point, 0, len(*road))
+ for _, pt := range *road {
+ cntr = append(cntr, &events.Point{X: int32(pt.X), Y: int32(pt.Y)})
+ }
+
+ msg := events.RoadMessage{
+ Contour: cntr,
+ Ellipse: ellipse,
+ FrameRef: frame.ref,
+ }
+
+ payload, err := proto.Marshal(&msg)
+ if err != nil {
+ log.Errorf("unable to marshal %T to protobuf: %err", msg, err)
+ return
+ }
+ publish(r.client, r.roadTopic, &payload)
+}
+
+var publish = func(client mqtt.Client, topic string, payload *[]byte) {
+ client.Publish(topic, 0, false, *payload)
+}
diff --git a/part/part_test.go b/part/part_test.go
new file mode 100644
index 0000000..9dd63e3
--- /dev/null
+++ b/part/part_test.go
@@ -0,0 +1,118 @@
+package part
+
+import (
+ "fmt"
+ "github.com/cyrilix/robocar-base/testtools"
+ "github.com/cyrilix/robocar-protobuf/go/events"
+ mqtt "github.com/eclipse/paho.mqtt.golang"
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes/timestamp"
+ log "github.com/sirupsen/logrus"
+ "io/ioutil"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestRoadPart_OnFrame(t *testing.T) {
+ oldRegister := registerCallBacks
+ oldPublish := publish
+ defer func() {
+ registerCallBacks = oldRegister
+ publish = oldPublish
+ }()
+
+ registerCallBacks = func(_ *RoadPart) {}
+
+ var muEventsPublished sync.Mutex
+ eventsPublished := make(map[string][]byte)
+ publish = func(client mqtt.Client, topic string, payload *[]byte) {
+ muEventsPublished.Lock()
+ defer muEventsPublished.Unlock()
+ eventsPublished[topic] = *payload
+ }
+
+ cameraTopic := "topic/camera"
+ roadTopic := "topic/road"
+
+ rp := NewRoadPart(nil, 20, roadTopic)
+ go func() {
+ if err := rp.Start(); err != nil {
+ t.Errorf("unable to start roadPart: %v", err)
+ t.FailNow()
+ }
+ }()
+
+ cases := []struct {
+ name string
+ msg mqtt.Message
+ expectedCntr []*events.Point
+ expectedEllipse events.Ellipse
+ }{
+ {
+ name: "image1",
+ msg: loadFrame(t, cameraTopic, "image"),
+ expectedCntr: []*events.Point{&events.Point{X: 0, Y: int32(45)}, &events.Point{X: 0, Y: 127}, &events.Point{X: 144, Y: 127}, &events.Point{X: 95, Y: 21}, &events.Point{X: 43, Y: 21}},
+ expectedEllipse: events.Ellipse{Center: &events.Point{X: 71, Y: 87,}, Width: 139, Height: 176, Angle: 92.66927, Confidence: 1.,},
+ },
+ }
+
+ for _, c := range cases {
+ rp.OnFrame(nil, c.msg)
+
+ time.Sleep(20 * time.Millisecond)
+
+ var roadMsg events.RoadMessage
+ err := proto.Unmarshal(eventsPublished[roadTopic], &roadMsg)
+ if err != nil {
+ t.Errorf("unable to unmarshal response, bad return type: %v", err)
+ continue
+ }
+
+ if len(roadMsg.Contour) != len(c.expectedCntr) {
+ t.Errorf("[%v] bad nb point in road contour: %v, wants %v", c.name, len(roadMsg.Contour), len(c.expectedCntr))
+ }
+ for idx, pt := range roadMsg.Contour {
+ if pt.String() != c.expectedCntr[idx].String() {
+ t.Errorf("[%v] bad point at position %v: %v, wants %v", c.name, idx, pt, c.expectedCntr[idx])
+ }
+ }
+ if roadMsg.Ellipse.String() != c.expectedEllipse.String() {
+ t.Errorf("[%v] bad ellipse: %v, wants %v", c.name, roadMsg.Ellipse, c.expectedEllipse)
+ }
+ frameRef := frameRefFromPayload(c.msg.Payload())
+ if frameRef.String() != roadMsg.GetFrameRef().String() {
+ t.Errorf("[%v] invalid frameRef: %v, wants %v", c.name, roadMsg.GetFrameRef(), frameRef)
+ }
+ }
+}
+
+func frameRefFromPayload(payload []byte) *events.FrameRef {
+ var msg events.FrameMessage
+ err := proto.Unmarshal(payload, &msg)
+ if err != nil {
+ log.Errorf("unable to unmarchal %T msg: %v", msg, err)
+ }
+ return msg.GetId()
+}
+
+func loadFrame(t *testing.T, topic string, name string) mqtt.Message {
+ img, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s.jpg", name))
+ if err != nil {
+ t.Fatalf("unable to load data test image: %v", err)
+ return nil
+ }
+ now := time.Now()
+ msg := events.FrameMessage{
+ Id: &events.FrameRef{
+ Name: name,
+ Id: name,
+ CreatedAt: ×tamp.Timestamp{
+ Seconds: now.Unix(),
+ Nanos: int32(now.Nanosecond()),
+ },
+ },
+ Frame: img,
+ }
+ return testtools.NewFakeMessageFromProtobuf(topic, &msg)
+}
diff --git a/part/testdata/image.jpg b/part/testdata/image.jpg
new file mode 100644
index 0000000..b34a0dd
Binary files /dev/null and b/part/testdata/image.jpg differ
diff --git a/part/testdata/image2.jpg b/part/testdata/image2.jpg
new file mode 100644
index 0000000..1c4a9c2
Binary files /dev/null and b/part/testdata/image2.jpg differ
diff --git a/part/testdata/image3.jpg b/part/testdata/image3.jpg
new file mode 100644
index 0000000..6e58bdb
Binary files /dev/null and b/part/testdata/image3.jpg differ
diff --git a/part/testdata/image4.jpg b/part/testdata/image4.jpg
new file mode 100644
index 0000000..7634cf8
Binary files /dev/null and b/part/testdata/image4.jpg differ
diff --git a/part/testdata/image5.jpg b/part/testdata/image5.jpg
new file mode 100644
index 0000000..d8783f2
Binary files /dev/null and b/part/testdata/image5.jpg differ
diff --git a/vendor/github.com/cyrilix/robocar-base/LICENSE b/vendor/github.com/cyrilix/robocar-base/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-base/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cyrilix/robocar-base/cli/cli.go b/vendor/github.com/cyrilix/robocar-base/cli/cli.go
new file mode 100644
index 0000000..7cfaa8f
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-base/cli/cli.go
@@ -0,0 +1,115 @@
+package cli
+
+import (
+ "flag"
+ "fmt"
+ "github.com/cyrilix/robocar-base/service"
+ MQTT "github.com/eclipse/paho.mqtt.golang"
+ "log"
+ "os"
+ "os/signal"
+ "strconv"
+ "syscall"
+)
+
+func SetDefaultValueFromEnv(value *string, key string, defaultValue string) {
+ if os.Getenv(key) != "" {
+ *value = os.Getenv(key)
+ } else {
+ *value = defaultValue
+ }
+}
+func SetIntDefaultValueFromEnv(value *int, key string, defaultValue int) error {
+ var sVal string
+ if os.Getenv(key) != "" {
+ sVal = os.Getenv(key)
+ val, err := strconv.Atoi(sVal)
+ if err != nil {
+ log.Printf("unable to convert string to int: %v", err)
+ return err
+ }
+ *value = val
+ } else {
+ *value = defaultValue
+ }
+ return nil
+}
+func SetFloat64DefaultValueFromEnv(value *float64, key string, defaultValue float64) error {
+ var sVal string
+ if os.Getenv(key) != "" {
+ sVal = os.Getenv(key)
+ val, err := strconv.ParseFloat(sVal, 64)
+ if err != nil {
+ log.Printf("unable to convert string to float: %v", err)
+ return err
+ }
+ *value = val
+ } else {
+ *value = defaultValue
+ }
+ return nil
+}
+
+func HandleExit(p service.Part) {
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Kill, os.Interrupt, syscall.SIGTERM)
+
+ go func() {
+ <-signals
+ p.Stop()
+ os.Exit(0)
+ }()
+}
+
+func InitMqttFlags(defaultClientId string, mqttBroker, username, password, clientId *string, mqttQos *int, mqttRetain *bool) {
+ SetDefaultValueFromEnv(clientId, "MQTT_CLIENT_ID", defaultClientId)
+ SetDefaultValueFromEnv(mqttBroker, "MQTT_BROKER", "tcp://127.0.0.1:1883")
+
+ flag.StringVar(mqttBroker, "mqtt-broker", *mqttBroker, "Broker Uri, use MQTT_BROKER env if arg not set")
+ flag.StringVar(username, "mqtt-username", os.Getenv("MQTT_USERNAME"), "Broker Username, use MQTT_USERNAME env if arg not set")
+ flag.StringVar(password, "mqtt-password", os.Getenv("MQTT_PASSWORD"), "Broker Password, MQTT_PASSWORD env if args not set")
+ flag.StringVar(clientId, "mqtt-client-id", *clientId, "Mqtt client id, use MQTT_CLIENT_ID env if args not set")
+ flag.IntVar(mqttQos, "mqtt-qos", *mqttQos, "Qos to pusblish message, use MQTT_QOS env if arg not set")
+ flag.BoolVar(mqttRetain, "mqtt-retain", *mqttRetain, "Retain mqtt message, if not set, true if MQTT_RETAIN env variable is set")
+}
+
+func InitIntFlag(key string, defValue int) int {
+ var value int
+ err := SetIntDefaultValueFromEnv(&value, key, defValue)
+ if err != nil {
+ log.Panicf("invalid int value: %v", err)
+ }
+ return value
+}
+
+func InitFloat64Flag(key string, defValue float64) float64 {
+ var value float64
+ err := SetFloat64DefaultValueFromEnv(&value, key, defValue)
+ if err != nil {
+ log.Panicf("invalid value: %v", err)
+ }
+ return value
+}
+
+func Connect(uri, username, password, clientId string) (MQTT.Client, error) {
+ //create a ClientOptions struct setting the broker address, clientid, turn
+ //off trace output and set the default message handler
+ opts := MQTT.NewClientOptions().AddBroker(uri)
+ opts.SetUsername(username)
+ opts.SetPassword(password)
+ opts.SetClientID(clientId)
+ opts.SetAutoReconnect(true)
+ opts.SetDefaultPublishHandler(
+ //define a function for the default message handler
+ func(client MQTT.Client, msg MQTT.Message) {
+ fmt.Printf("TOPIC: %s\n", msg.Topic())
+ fmt.Printf("MSG: %s\n", msg.Payload())
+ })
+
+ //create and start a client using the above ClientOptions
+ client := MQTT.NewClient(opts)
+ if token := client.Connect(); token.Wait() && token.Error() != nil {
+ return nil, fmt.Errorf("unable to connect to mqtt bus: %v", token.Error())
+ }
+ return client, nil
+}
diff --git a/vendor/github.com/cyrilix/robocar-base/service/part.go b/vendor/github.com/cyrilix/robocar-base/service/part.go
new file mode 100644
index 0000000..c370778
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-base/service/part.go
@@ -0,0 +1,33 @@
+package service
+
+import (
+ "fmt"
+ mqtt "github.com/eclipse/paho.mqtt.golang"
+ "log"
+)
+
+func StopService(name string, client mqtt.Client, topics ...string) {
+ log.Printf("Stop %s service", name)
+ token := client.Unsubscribe(topics...)
+ token.Wait()
+ if token.Error() != nil {
+ log.Printf("unable to unsubscribe service: %v", token.Error())
+ }
+ client.Disconnect(50)
+}
+
+func RegisterCallback(client mqtt.Client, topic string, callback mqtt.MessageHandler) error {
+ log.Printf("Register callback on topic %v", topic)
+ token := client.Subscribe(topic, 0, callback)
+ token.Wait()
+ if token.Error() != nil {
+ return fmt.Errorf("unable to register callback on topic %s: %v", topic, token.Error())
+ }
+ return nil
+}
+
+type Part interface {
+ Start() error
+ Stop()
+}
+
diff --git a/vendor/github.com/cyrilix/robocar-base/testtools/testtools.go b/vendor/github.com/cyrilix/robocar-base/testtools/testtools.go
new file mode 100644
index 0000000..e49c3d9
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-base/testtools/testtools.go
@@ -0,0 +1,60 @@
+package testtools
+
+import (
+ mqtt "github.com/eclipse/paho.mqtt.golang"
+ "github.com/golang/protobuf/proto"
+ log "github.com/sirupsen/logrus"
+)
+
+type fakeMessage struct {
+ qos byte
+ topic string
+ payload []byte
+ acked bool
+}
+
+func (f *fakeMessage) Duplicate() bool {
+ return false
+}
+
+func (f *fakeMessage) Qos() byte {
+ return f.qos
+}
+
+func (f *fakeMessage) Retained() bool {
+ return false
+}
+
+func (f *fakeMessage) Topic() string {
+ return f.topic
+}
+
+func (f *fakeMessage) MessageID() uint16 {
+ return 1234
+}
+
+func (f *fakeMessage) Payload() []byte {
+ return f.payload
+}
+
+func (f *fakeMessage) Ack() {
+ f.acked = true
+}
+
+func NewFakeMessage(topic string, payload []byte) mqtt.Message {
+ return &fakeMessage{
+ qos: 0,
+ topic: topic,
+ payload: payload,
+ acked: false,
+ }
+}
+
+func NewFakeMessageFromProtobuf(topic string, msg proto.Message) mqtt.Message{
+ payload, err := proto.Marshal(msg)
+ if err != nil {
+ log.Errorf("unable to marshal protobuf message %T: %v", msg, err)
+ return nil
+ }
+ return NewFakeMessage(topic, payload)
+}
diff --git a/vendor/github.com/cyrilix/robocar-protobuf/go/LICENSE b/vendor/github.com/cyrilix/robocar-protobuf/go/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-protobuf/go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cyrilix/robocar-protobuf/go/events/events.pb.go b/vendor/github.com/cyrilix/robocar-protobuf/go/events/events.pb.go
new file mode 100644
index 0000000..85a6ad5
--- /dev/null
+++ b/vendor/github.com/cyrilix/robocar-protobuf/go/events/events.pb.go
@@ -0,0 +1,735 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: events/events.proto
+
+package events
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type DriveMode int32
+
+const (
+ DriveMode_INVALID DriveMode = 0
+ DriveMode_USER DriveMode = 1
+ DriveMode_PILOT DriveMode = 2
+)
+
+var DriveMode_name = map[int32]string{
+ 0: "INVALID",
+ 1: "USER",
+ 2: "PILOT",
+}
+
+var DriveMode_value = map[string]int32{
+ "INVALID": 0,
+ "USER": 1,
+ "PILOT": 2,
+}
+
+func (x DriveMode) String() string {
+ return proto.EnumName(DriveMode_name, int32(x))
+}
+
+func (DriveMode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{0}
+}
+
+type TypeObject int32
+
+const (
+ TypeObject_ANY TypeObject = 0
+ TypeObject_CAR TypeObject = 1
+ TypeObject_BUMP TypeObject = 2
+ TypeObject_PLOT TypeObject = 3
+)
+
+var TypeObject_name = map[int32]string{
+ 0: "ANY",
+ 1: "CAR",
+ 2: "BUMP",
+ 3: "PLOT",
+}
+
+var TypeObject_value = map[string]int32{
+ "ANY": 0,
+ "CAR": 1,
+ "BUMP": 2,
+ "PLOT": 3,
+}
+
+func (x TypeObject) String() string {
+ return proto.EnumName(TypeObject_name, int32(x))
+}
+
+func (TypeObject) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{1}
+}
+
+type FrameRef struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+ CreatedAt *timestamp.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FrameRef) Reset() { *m = FrameRef{} }
+func (m *FrameRef) String() string { return proto.CompactTextString(m) }
+func (*FrameRef) ProtoMessage() {}
+func (*FrameRef) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{0}
+}
+
+func (m *FrameRef) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FrameRef.Unmarshal(m, b)
+}
+func (m *FrameRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FrameRef.Marshal(b, m, deterministic)
+}
+func (m *FrameRef) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FrameRef.Merge(m, src)
+}
+func (m *FrameRef) XXX_Size() int {
+ return xxx_messageInfo_FrameRef.Size(m)
+}
+func (m *FrameRef) XXX_DiscardUnknown() {
+ xxx_messageInfo_FrameRef.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FrameRef proto.InternalMessageInfo
+
+func (m *FrameRef) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *FrameRef) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+func (m *FrameRef) GetCreatedAt() *timestamp.Timestamp {
+ if m != nil {
+ return m.CreatedAt
+ }
+ return nil
+}
+
+type FrameMessage struct {
+ Id *FrameRef `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Frame []byte `protobuf:"bytes,2,opt,name=frame,proto3" json:"frame,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FrameMessage) Reset() { *m = FrameMessage{} }
+func (m *FrameMessage) String() string { return proto.CompactTextString(m) }
+func (*FrameMessage) ProtoMessage() {}
+func (*FrameMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{1}
+}
+
+func (m *FrameMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FrameMessage.Unmarshal(m, b)
+}
+func (m *FrameMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FrameMessage.Marshal(b, m, deterministic)
+}
+func (m *FrameMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FrameMessage.Merge(m, src)
+}
+func (m *FrameMessage) XXX_Size() int {
+ return xxx_messageInfo_FrameMessage.Size(m)
+}
+func (m *FrameMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_FrameMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FrameMessage proto.InternalMessageInfo
+
+func (m *FrameMessage) GetId() *FrameRef {
+ if m != nil {
+ return m.Id
+ }
+ return nil
+}
+
+func (m *FrameMessage) GetFrame() []byte {
+ if m != nil {
+ return m.Frame
+ }
+ return nil
+}
+
+type SteeringMessage struct {
+ Steering float32 `protobuf:"fixed32,1,opt,name=steering,proto3" json:"steering,omitempty"`
+ Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
+ FrameRef *FrameRef `protobuf:"bytes,3,opt,name=frame_ref,json=frameRef,proto3" json:"frame_ref,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SteeringMessage) Reset() { *m = SteeringMessage{} }
+func (m *SteeringMessage) String() string { return proto.CompactTextString(m) }
+func (*SteeringMessage) ProtoMessage() {}
+func (*SteeringMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{2}
+}
+
+func (m *SteeringMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SteeringMessage.Unmarshal(m, b)
+}
+func (m *SteeringMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SteeringMessage.Marshal(b, m, deterministic)
+}
+func (m *SteeringMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SteeringMessage.Merge(m, src)
+}
+func (m *SteeringMessage) XXX_Size() int {
+ return xxx_messageInfo_SteeringMessage.Size(m)
+}
+func (m *SteeringMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_SteeringMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SteeringMessage proto.InternalMessageInfo
+
+func (m *SteeringMessage) GetSteering() float32 {
+ if m != nil {
+ return m.Steering
+ }
+ return 0
+}
+
+func (m *SteeringMessage) GetConfidence() float32 {
+ if m != nil {
+ return m.Confidence
+ }
+ return 0
+}
+
+func (m *SteeringMessage) GetFrameRef() *FrameRef {
+ if m != nil {
+ return m.FrameRef
+ }
+ return nil
+}
+
+type ThrottleMessage struct {
+ Throttle float32 `protobuf:"fixed32,1,opt,name=throttle,proto3" json:"throttle,omitempty"`
+ Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
+ FrameRef *FrameRef `protobuf:"bytes,3,opt,name=frame_ref,json=frameRef,proto3" json:"frame_ref,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ThrottleMessage) Reset() { *m = ThrottleMessage{} }
+func (m *ThrottleMessage) String() string { return proto.CompactTextString(m) }
+func (*ThrottleMessage) ProtoMessage() {}
+func (*ThrottleMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{3}
+}
+
+func (m *ThrottleMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ThrottleMessage.Unmarshal(m, b)
+}
+func (m *ThrottleMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ThrottleMessage.Marshal(b, m, deterministic)
+}
+func (m *ThrottleMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ThrottleMessage.Merge(m, src)
+}
+func (m *ThrottleMessage) XXX_Size() int {
+ return xxx_messageInfo_ThrottleMessage.Size(m)
+}
+func (m *ThrottleMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_ThrottleMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ThrottleMessage proto.InternalMessageInfo
+
+func (m *ThrottleMessage) GetThrottle() float32 {
+ if m != nil {
+ return m.Throttle
+ }
+ return 0
+}
+
+func (m *ThrottleMessage) GetConfidence() float32 {
+ if m != nil {
+ return m.Confidence
+ }
+ return 0
+}
+
+func (m *ThrottleMessage) GetFrameRef() *FrameRef {
+ if m != nil {
+ return m.FrameRef
+ }
+ return nil
+}
+
+type DriveModeMessage struct {
+ DriveMode DriveMode `protobuf:"varint,1,opt,name=drive_mode,json=driveMode,proto3,enum=robocar.events.DriveMode" json:"drive_mode,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DriveModeMessage) Reset() { *m = DriveModeMessage{} }
+func (m *DriveModeMessage) String() string { return proto.CompactTextString(m) }
+func (*DriveModeMessage) ProtoMessage() {}
+func (*DriveModeMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{4}
+}
+
+func (m *DriveModeMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DriveModeMessage.Unmarshal(m, b)
+}
+func (m *DriveModeMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DriveModeMessage.Marshal(b, m, deterministic)
+}
+func (m *DriveModeMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DriveModeMessage.Merge(m, src)
+}
+func (m *DriveModeMessage) XXX_Size() int {
+ return xxx_messageInfo_DriveModeMessage.Size(m)
+}
+func (m *DriveModeMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_DriveModeMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DriveModeMessage proto.InternalMessageInfo
+
+func (m *DriveModeMessage) GetDriveMode() DriveMode {
+ if m != nil {
+ return m.DriveMode
+ }
+ return DriveMode_INVALID
+}
+
+type ObjectsMessage struct {
+ Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"`
+ FrameRef *FrameRef `protobuf:"bytes,2,opt,name=frame_ref,json=frameRef,proto3" json:"frame_ref,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ObjectsMessage) Reset() { *m = ObjectsMessage{} }
+func (m *ObjectsMessage) String() string { return proto.CompactTextString(m) }
+func (*ObjectsMessage) ProtoMessage() {}
+func (*ObjectsMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{5}
+}
+
+func (m *ObjectsMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ObjectsMessage.Unmarshal(m, b)
+}
+func (m *ObjectsMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ObjectsMessage.Marshal(b, m, deterministic)
+}
+func (m *ObjectsMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ObjectsMessage.Merge(m, src)
+}
+func (m *ObjectsMessage) XXX_Size() int {
+ return xxx_messageInfo_ObjectsMessage.Size(m)
+}
+func (m *ObjectsMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_ObjectsMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ObjectsMessage proto.InternalMessageInfo
+
+func (m *ObjectsMessage) GetObjects() []*Object {
+ if m != nil {
+ return m.Objects
+ }
+ return nil
+}
+
+func (m *ObjectsMessage) GetFrameRef() *FrameRef {
+ if m != nil {
+ return m.FrameRef
+ }
+ return nil
+}
+
+// BoundingBox that contains an object
+type Object struct {
+ Type TypeObject `protobuf:"varint,1,opt,name=type,proto3,enum=robocar.events.TypeObject" json:"type,omitempty"`
+ Left int32 `protobuf:"varint,2,opt,name=left,proto3" json:"left,omitempty"`
+ Top int32 `protobuf:"varint,3,opt,name=top,proto3" json:"top,omitempty"`
+ Right int32 `protobuf:"varint,4,opt,name=right,proto3" json:"right,omitempty"`
+ Bottom int32 `protobuf:"varint,5,opt,name=bottom,proto3" json:"bottom,omitempty"`
+ Confidence float32 `protobuf:"fixed32,6,opt,name=confidence,proto3" json:"confidence,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Object) Reset() { *m = Object{} }
+func (m *Object) String() string { return proto.CompactTextString(m) }
+func (*Object) ProtoMessage() {}
+func (*Object) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{6}
+}
+
+func (m *Object) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Object.Unmarshal(m, b)
+}
+func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Object.Marshal(b, m, deterministic)
+}
+func (m *Object) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Object.Merge(m, src)
+}
+func (m *Object) XXX_Size() int {
+ return xxx_messageInfo_Object.Size(m)
+}
+func (m *Object) XXX_DiscardUnknown() {
+ xxx_messageInfo_Object.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Object proto.InternalMessageInfo
+
+func (m *Object) GetType() TypeObject {
+ if m != nil {
+ return m.Type
+ }
+ return TypeObject_ANY
+}
+
+func (m *Object) GetLeft() int32 {
+ if m != nil {
+ return m.Left
+ }
+ return 0
+}
+
+func (m *Object) GetTop() int32 {
+ if m != nil {
+ return m.Top
+ }
+ return 0
+}
+
+func (m *Object) GetRight() int32 {
+ if m != nil {
+ return m.Right
+ }
+ return 0
+}
+
+func (m *Object) GetBottom() int32 {
+ if m != nil {
+ return m.Bottom
+ }
+ return 0
+}
+
+func (m *Object) GetConfidence() float32 {
+ if m != nil {
+ return m.Confidence
+ }
+ return 0
+}
+
+type SwitchRecordMessage struct {
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SwitchRecordMessage) Reset() { *m = SwitchRecordMessage{} }
+func (m *SwitchRecordMessage) String() string { return proto.CompactTextString(m) }
+func (*SwitchRecordMessage) ProtoMessage() {}
+func (*SwitchRecordMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{7}
+}
+
+func (m *SwitchRecordMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SwitchRecordMessage.Unmarshal(m, b)
+}
+func (m *SwitchRecordMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SwitchRecordMessage.Marshal(b, m, deterministic)
+}
+func (m *SwitchRecordMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SwitchRecordMessage.Merge(m, src)
+}
+func (m *SwitchRecordMessage) XXX_Size() int {
+ return xxx_messageInfo_SwitchRecordMessage.Size(m)
+}
+func (m *SwitchRecordMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_SwitchRecordMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SwitchRecordMessage proto.InternalMessageInfo
+
+func (m *SwitchRecordMessage) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+// Road description
+type RoadMessage struct {
+ Contour []*Point `protobuf:"bytes,1,rep,name=contour,proto3" json:"contour,omitempty"`
+ Ellipse *Ellipse `protobuf:"bytes,2,opt,name=ellipse,proto3" json:"ellipse,omitempty"`
+ FrameRef *FrameRef `protobuf:"bytes,3,opt,name=frame_ref,json=frameRef,proto3" json:"frame_ref,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RoadMessage) Reset() { *m = RoadMessage{} }
+func (m *RoadMessage) String() string { return proto.CompactTextString(m) }
+func (*RoadMessage) ProtoMessage() {}
+func (*RoadMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{8}
+}
+
+func (m *RoadMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RoadMessage.Unmarshal(m, b)
+}
+func (m *RoadMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RoadMessage.Marshal(b, m, deterministic)
+}
+func (m *RoadMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RoadMessage.Merge(m, src)
+}
+func (m *RoadMessage) XXX_Size() int {
+ return xxx_messageInfo_RoadMessage.Size(m)
+}
+func (m *RoadMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_RoadMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RoadMessage proto.InternalMessageInfo
+
+func (m *RoadMessage) GetContour() []*Point {
+ if m != nil {
+ return m.Contour
+ }
+ return nil
+}
+
+func (m *RoadMessage) GetEllipse() *Ellipse {
+ if m != nil {
+ return m.Ellipse
+ }
+ return nil
+}
+
+func (m *RoadMessage) GetFrameRef() *FrameRef {
+ if m != nil {
+ return m.FrameRef
+ }
+ return nil
+}
+
+type Point struct {
+ X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
+ Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Point) Reset() { *m = Point{} }
+func (m *Point) String() string { return proto.CompactTextString(m) }
+func (*Point) ProtoMessage() {}
+func (*Point) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{9}
+}
+
+func (m *Point) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Point.Unmarshal(m, b)
+}
+func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Point.Marshal(b, m, deterministic)
+}
+func (m *Point) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Point.Merge(m, src)
+}
+func (m *Point) XXX_Size() int {
+ return xxx_messageInfo_Point.Size(m)
+}
+func (m *Point) XXX_DiscardUnknown() {
+ xxx_messageInfo_Point.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Point proto.InternalMessageInfo
+
+func (m *Point) GetX() int32 {
+ if m != nil {
+ return m.X
+ }
+ return 0
+}
+
+func (m *Point) GetY() int32 {
+ if m != nil {
+ return m.Y
+ }
+ return 0
+}
+
+type Ellipse struct {
+ Center *Point `protobuf:"bytes,1,opt,name=center,proto3" json:"center,omitempty"`
+ Width int32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"`
+ Height int32 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
+ Angle float32 `protobuf:"fixed32,4,opt,name=angle,proto3" json:"angle,omitempty"`
+ Confidence float32 `protobuf:"fixed32,5,opt,name=confidence,proto3" json:"confidence,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Ellipse) Reset() { *m = Ellipse{} }
+func (m *Ellipse) String() string { return proto.CompactTextString(m) }
+func (*Ellipse) ProtoMessage() {}
+func (*Ellipse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ec31f2d2a3db598, []int{10}
+}
+
+func (m *Ellipse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Ellipse.Unmarshal(m, b)
+}
+func (m *Ellipse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Ellipse.Marshal(b, m, deterministic)
+}
+func (m *Ellipse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Ellipse.Merge(m, src)
+}
+func (m *Ellipse) XXX_Size() int {
+ return xxx_messageInfo_Ellipse.Size(m)
+}
+func (m *Ellipse) XXX_DiscardUnknown() {
+ xxx_messageInfo_Ellipse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Ellipse proto.InternalMessageInfo
+
+func (m *Ellipse) GetCenter() *Point {
+ if m != nil {
+ return m.Center
+ }
+ return nil
+}
+
+func (m *Ellipse) GetWidth() int32 {
+ if m != nil {
+ return m.Width
+ }
+ return 0
+}
+
+func (m *Ellipse) GetHeight() int32 {
+ if m != nil {
+ return m.Height
+ }
+ return 0
+}
+
+func (m *Ellipse) GetAngle() float32 {
+ if m != nil {
+ return m.Angle
+ }
+ return 0
+}
+
+func (m *Ellipse) GetConfidence() float32 {
+ if m != nil {
+ return m.Confidence
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("robocar.events.DriveMode", DriveMode_name, DriveMode_value)
+ proto.RegisterEnum("robocar.events.TypeObject", TypeObject_name, TypeObject_value)
+ proto.RegisterType((*FrameRef)(nil), "robocar.events.FrameRef")
+ proto.RegisterType((*FrameMessage)(nil), "robocar.events.FrameMessage")
+ proto.RegisterType((*SteeringMessage)(nil), "robocar.events.SteeringMessage")
+ proto.RegisterType((*ThrottleMessage)(nil), "robocar.events.ThrottleMessage")
+ proto.RegisterType((*DriveModeMessage)(nil), "robocar.events.DriveModeMessage")
+ proto.RegisterType((*ObjectsMessage)(nil), "robocar.events.ObjectsMessage")
+ proto.RegisterType((*Object)(nil), "robocar.events.Object")
+ proto.RegisterType((*SwitchRecordMessage)(nil), "robocar.events.SwitchRecordMessage")
+ proto.RegisterType((*RoadMessage)(nil), "robocar.events.RoadMessage")
+ proto.RegisterType((*Point)(nil), "robocar.events.Point")
+ proto.RegisterType((*Ellipse)(nil), "robocar.events.Ellipse")
+}
+
+func init() { proto.RegisterFile("events/events.proto", fileDescriptor_8ec31f2d2a3db598) }
+
+var fileDescriptor_8ec31f2d2a3db598 = []byte{
+ // 649 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xcb, 0x6e, 0xd3, 0x5c,
+ 0x10, 0xae, 0x9d, 0x38, 0x76, 0x26, 0x55, 0x7e, 0xeb, 0xf4, 0xa7, 0x98, 0x2c, 0xa0, 0x32, 0x9b,
+ 0xa8, 0x52, 0x1d, 0x08, 0x42, 0x82, 0x65, 0x4a, 0x8b, 0x54, 0xa9, 0x97, 0xe8, 0x34, 0x45, 0x82,
+ 0x4d, 0xe4, 0xd8, 0xe3, 0xc4, 0xc8, 0xf1, 0x89, 0xec, 0xd3, 0x4b, 0xf6, 0x3c, 0x06, 0x0f, 0xc0,
+ 0x82, 0x87, 0x44, 0x3e, 0x97, 0xd0, 0x5a, 0xaa, 0x84, 0x90, 0x58, 0x65, 0xbe, 0x39, 0x33, 0xdf,
+ 0x7c, 0x73, 0x89, 0x61, 0x07, 0x6f, 0x30, 0xe7, 0xe5, 0x40, 0xfe, 0x04, 0xab, 0x82, 0x71, 0x46,
+ 0xba, 0x05, 0x9b, 0xb1, 0x28, 0x2c, 0x02, 0xe9, 0xed, 0xbd, 0x98, 0x33, 0x36, 0xcf, 0x70, 0x20,
+ 0x5e, 0x67, 0xd7, 0xc9, 0x80, 0xa7, 0x4b, 0x2c, 0x79, 0xb8, 0x5c, 0xc9, 0x04, 0x3f, 0x05, 0xe7,
+ 0x63, 0x11, 0x2e, 0x91, 0x62, 0x42, 0x08, 0x34, 0xf3, 0x70, 0x89, 0x9e, 0xb1, 0x67, 0xf4, 0xdb,
+ 0x54, 0xd8, 0xa4, 0x0b, 0x66, 0x1a, 0x7b, 0xa6, 0xf0, 0x98, 0x69, 0x4c, 0xde, 0x03, 0x44, 0x05,
+ 0x86, 0x1c, 0xe3, 0x69, 0xc8, 0xbd, 0xc6, 0x9e, 0xd1, 0xef, 0x0c, 0x7b, 0x81, 0xac, 0x12, 0xe8,
+ 0x2a, 0xc1, 0x44, 0x57, 0xa1, 0x6d, 0x15, 0x3d, 0xe2, 0xfe, 0x39, 0x6c, 0x8b, 0x52, 0x67, 0x58,
+ 0x96, 0xe1, 0x1c, 0x49, 0x5f, 0x50, 0x1b, 0x82, 0xc2, 0x0b, 0x1e, 0x0a, 0x0f, 0xb4, 0x28, 0x51,
+ 0xf4, 0x7f, 0xb0, 0x92, 0x0a, 0x0b, 0x1d, 0xdb, 0x54, 0x02, 0xff, 0x9b, 0x01, 0xff, 0x5d, 0x72,
+ 0xc4, 0x22, 0xcd, 0xe7, 0x9a, 0xb3, 0x07, 0x4e, 0xa9, 0x5c, 0x82, 0xd9, 0xa4, 0x1b, 0x4c, 0x9e,
+ 0x03, 0x44, 0x2c, 0x4f, 0xd2, 0x18, 0xf3, 0x48, 0x52, 0x99, 0xf4, 0x9e, 0x87, 0xbc, 0x85, 0xb6,
+ 0x20, 0x9e, 0x16, 0x98, 0xa8, 0xce, 0x1e, 0x97, 0xe5, 0x24, 0xca, 0x12, 0x32, 0x26, 0x8b, 0x82,
+ 0x71, 0x9e, 0xe1, 0x3d, 0x19, 0x5c, 0xb9, 0xb4, 0x0c, 0x8d, 0xff, 0x95, 0x8c, 0x53, 0x70, 0x8f,
+ 0x8a, 0xf4, 0x06, 0xcf, 0x58, 0xbc, 0x91, 0xf1, 0x0e, 0x20, 0xae, 0x7c, 0xd3, 0x25, 0x8b, 0xa5,
+ 0x90, 0xee, 0xf0, 0x59, 0x9d, 0x6b, 0x93, 0x45, 0xdb, 0xb1, 0x36, 0xfd, 0x35, 0x74, 0x2f, 0x66,
+ 0x5f, 0x31, 0xe2, 0xa5, 0xe6, 0x7a, 0x05, 0x36, 0x93, 0x1e, 0xcf, 0xd8, 0x6b, 0xf4, 0x3b, 0xc3,
+ 0xdd, 0x3a, 0x91, 0x4c, 0xa0, 0x3a, 0xec, 0x61, 0x23, 0xe6, 0x1f, 0x37, 0xf2, 0xd3, 0x80, 0x96,
+ 0xa4, 0x22, 0x01, 0x34, 0xf9, 0x7a, 0xa5, 0x95, 0xf7, 0xea, 0xc9, 0x93, 0xf5, 0x0a, 0x55, 0x51,
+ 0x11, 0x57, 0x1d, 0x70, 0x86, 0x09, 0x17, 0xc5, 0x2c, 0x2a, 0x6c, 0xe2, 0x42, 0x83, 0xb3, 0x95,
+ 0x18, 0xa4, 0x45, 0x2b, 0xb3, 0xba, 0xa6, 0x22, 0x9d, 0x2f, 0xb8, 0xd7, 0x14, 0x3e, 0x09, 0xc8,
+ 0x2e, 0xb4, 0x66, 0x8c, 0x73, 0xb6, 0xf4, 0x2c, 0xe1, 0x56, 0xa8, 0xb6, 0xae, 0x56, 0x7d, 0x5d,
+ 0xfe, 0x00, 0x76, 0x2e, 0x6f, 0x53, 0x1e, 0x2d, 0x28, 0x46, 0xac, 0x88, 0xf5, 0xb8, 0x3c, 0xb0,
+ 0x31, 0x0f, 0x67, 0x19, 0xca, 0x0b, 0x77, 0xa8, 0x86, 0xfe, 0x0f, 0x03, 0x3a, 0x94, 0x85, 0x9b,
+ 0xc8, 0x01, 0xd8, 0x11, 0xcb, 0x39, 0xbb, 0x2e, 0xd4, 0x60, 0x9f, 0xd4, 0xfb, 0x1c, 0xb3, 0x34,
+ 0xe7, 0x54, 0x47, 0x91, 0xd7, 0x60, 0x63, 0x96, 0xa5, 0xab, 0x12, 0xd5, 0x54, 0x9f, 0xd6, 0x13,
+ 0x8e, 0xe5, 0x33, 0xd5, 0x71, 0x7f, 0x7b, 0x53, 0x2f, 0xc1, 0x12, 0xb5, 0xc9, 0x36, 0x18, 0x77,
+ 0xa2, 0x0f, 0x8b, 0x1a, 0x77, 0x15, 0x5a, 0xab, 0x19, 0x1b, 0x6b, 0xff, 0xbb, 0x01, 0xb6, 0x2a,
+ 0x48, 0x0e, 0xa0, 0x15, 0x61, 0xce, 0xb1, 0x50, 0x7f, 0xeb, 0x47, 0x5a, 0x51, 0x41, 0xd5, 0x26,
+ 0x6e, 0xd3, 0x98, 0x2f, 0x14, 0x99, 0x04, 0xd5, 0x26, 0x16, 0x28, 0x16, 0x24, 0x97, 0xa6, 0x50,
+ 0x15, 0x1d, 0xe6, 0xf3, 0x0c, 0xc5, 0xde, 0x4c, 0x2a, 0x41, 0x6d, 0x3f, 0x56, 0x7d, 0x3f, 0xfb,
+ 0x07, 0xd0, 0xde, 0x5c, 0x38, 0xe9, 0x80, 0x7d, 0x72, 0xfe, 0x69, 0x74, 0x7a, 0x72, 0xe4, 0x6e,
+ 0x11, 0x07, 0x9a, 0x57, 0x97, 0xc7, 0xd4, 0x35, 0x48, 0x1b, 0xac, 0xf1, 0xc9, 0xe9, 0xc5, 0xc4,
+ 0x35, 0xf7, 0x87, 0x00, 0xbf, 0xcf, 0x8a, 0xd8, 0xd0, 0x18, 0x9d, 0x7f, 0x76, 0xb7, 0x2a, 0xe3,
+ 0xc3, 0xa8, 0x0a, 0x75, 0xa0, 0x79, 0x78, 0x75, 0x36, 0x76, 0xcd, 0xca, 0x1a, 0x57, 0x39, 0x8d,
+ 0x43, 0xe7, 0x4b, 0x4b, 0xb6, 0x37, 0x6b, 0x89, 0x2f, 0xe0, 0x9b, 0x5f, 0x01, 0x00, 0x00, 0xff,
+ 0xff, 0xed, 0xb2, 0x58, 0x43, 0x9c, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore b/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore
new file mode 100644
index 0000000..47bb0de
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore
@@ -0,0 +1,36 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+*.msg
+*.lok
+
+samples/trivial
+samples/trivial2
+samples/sample
+samples/reconnect
+samples/ssl
+samples/custom_store
+samples/simple
+samples/stdinpub
+samples/stdoutsub
+samples/routing
\ No newline at end of file
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md
new file mode 100644
index 0000000..9791dc6
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md
@@ -0,0 +1,56 @@
+Contributing to Paho
+====================
+
+Thanks for your interest in this project.
+
+Project description:
+--------------------
+
+The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT).
+Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community.
+
+- https://projects.eclipse.org/projects/technology.paho
+
+Developer resources:
+--------------------
+
+Information regarding source code management, builds, coding standards, and more.
+
+- https://projects.eclipse.org/projects/technology.paho/developer
+
+Contributor License Agreement:
+------------------------------
+
+Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA).
+
+- http://www.eclipse.org/legal/CLA.php
+
+Contributing Code:
+------------------
+
+The Go client is developed in Github, see their documentation on the process of forking and pull requests; https://help.github.com/categories/collaborating-on-projects-using-pull-requests/
+
+Git commit messages should follow the style described here;
+
+http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+
+Contact:
+--------
+
+Contact the project developers via the project's "dev" list.
+
+- https://dev.eclipse.org/mailman/listinfo/paho-dev
+
+Search for bugs:
+----------------
+
+This project uses Github issues to track ongoing development and issues.
+
+- https://github.com/eclipse/paho.mqtt.golang/issues
+
+Create a new bug:
+-----------------
+
+Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome!
+
+- https://github.com/eclipse/paho.mqtt.golang/issues
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION
new file mode 100644
index 0000000..34e4973
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION
@@ -0,0 +1,15 @@
+
+
+Eclipse Distribution License - v 1.0
+
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE
new file mode 100644
index 0000000..aa7cc81
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE
@@ -0,0 +1,87 @@
+Eclipse Public License - v 1.0
+
+THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+1. DEFINITIONS
+
+"Contribution" means:
+
+a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
+
+b) in the case of each subsequent Contributor:
+
+i) changes to the Program, and
+
+ii) additions to the Program;
+
+where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
+
+"Contributor" means any person or entity that distributes the Program.
+
+"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
+
+"Program" means the Contributions distributed in accordance with this Agreement.
+
+"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
+
+2. GRANT OF RIGHTS
+
+a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
+
+b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
+
+c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
+
+d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
+
+3. REQUIREMENTS
+
+A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
+
+a) it complies with the terms and conditions of this Agreement; and
+
+b) its license agreement:
+
+i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
+
+ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
+
+iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
+
+iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
+
+When the Program is made available in source code form:
+
+a) it must be made available under this Agreement; and
+
+b) a copy of this Agreement must be included with each copy of the Program.
+
+Contributors may not remove or alter any copyright notices contained within the Program.
+
+Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
+
+4. COMMERCIAL DISTRIBUTION
+
+Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
+
+For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
+
+5. NO WARRANTY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
+
+6. DISCLAIMER OF LIABILITY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+7. GENERAL
+
+If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
+
+If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
+
+All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
+
+Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
+
+This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
\ No newline at end of file
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/README.md b/vendor/github.com/eclipse/paho.mqtt.golang/README.md
new file mode 100644
index 0000000..81c7148
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/README.md
@@ -0,0 +1,67 @@
+
+[](https://godoc.org/github.com/eclipse/paho.mqtt.golang)
+[](https://goreportcard.com/report/github.com/eclipse/paho.mqtt.golang)
+
+Eclipse Paho MQTT Go client
+===========================
+
+
+This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT Go client library.
+
+This code builds a library which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages.
+
+This library supports a fully asynchronous mode of operation.
+
+
+Installation and Build
+----------------------
+
+This client is designed to work with the standard Go tools, so installation is as easy as:
+
+```
+go get github.com/eclipse/paho.mqtt.golang
+```
+
+The client depends on Google's [websockets](https://godoc.org/golang.org/x/net/websocket) and [proxy](https://godoc.org/golang.org/x/net/proxy) package,
+also easily installed with the commands:
+
+```
+go get golang.org/x/net/websocket
+go get golang.org/x/net/proxy
+```
+
+
+Usage and API
+-------------
+
+Detailed API documentation is available by using to godoc tool, or can be browsed online
+using the [godoc.org](http://godoc.org/github.com/eclipse/paho.mqtt.golang) service.
+
+Make use of the library by importing it in your Go client source code. For example,
+```
+import "github.com/eclipse/paho.mqtt.golang"
+```
+
+Samples are available in the `cmd` directory for reference.
+
+
+Runtime tracing
+---------------
+
+Tracing is enabled by assigning logs (from the Go log package) to the logging endpoints, ERROR, CRITICAL, WARN and DEBUG
+
+
+Reporting bugs
+--------------
+
+Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues
+
+
+More information
+----------------
+
+Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev).
+
+General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt).
+
+There is much more information available via the [MQTT community site](http://mqtt.org).
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/about.html b/vendor/github.com/eclipse/paho.mqtt.golang/about.html
new file mode 100644
index 0000000..b183f41
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/about.html
@@ -0,0 +1,41 @@
+
+
+
+About
+
+
+About This Content
+
+December 9, 2013
+License
+
+The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise
+indicated below, the Content is provided to you under the terms and conditions of the
+Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL").
+A copy of the EPL is available at
+http://www.eclipse.org/legal/epl-v10.html
+and a copy of the EDL is available at
+http://www.eclipse.org/org/documents/edl-v10.php.
+For purposes of the EPL, "Program" will mean the Content.
+
+If you did not receive this Content directly from the Eclipse Foundation, the Content is
+being redistributed by another party ("Redistributor") and different terms and conditions may
+apply to your use of any object code in the Content. Check the Redistributor's license that was
+provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise
+indicated below, the terms and conditions of the EPL still apply to any source code in the Content
+and such source code may be obtained at http://www.eclipse.org.
+
+
+ Third Party Content
+ The Content includes items that have been sourced from third parties as set out below. If you
+ did not receive this Content directly from the Eclipse Foundation, the following is provided
+ for informational purposes only, and you should look to the Redistributor's license for
+ terms and conditions of use.
+
+ None
+
+
+
+
+
+
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/client.go b/vendor/github.com/eclipse/paho.mqtt.golang/client.go
new file mode 100644
index 0000000..24d56c1
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/client.go
@@ -0,0 +1,759 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+// Portions copyright © 2018 TIBCO Software Inc.
+
+// Package mqtt provides an MQTT v3.1.1 client library.
+package mqtt
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const (
+ disconnected uint32 = iota
+ connecting
+ reconnecting
+ connected
+)
+
+// Client is the interface definition for a Client as used by this
+// library, the interface is primarily to allow mocking tests.
+//
+// It is an MQTT v3.1.1 client for communicating
+// with an MQTT server using non-blocking methods that allow work
+// to be done in the background.
+// An application may connect to an MQTT server using:
+// A plain TCP socket
+// A secure SSL/TLS socket
+// A websocket
+// To enable ensured message delivery at Quality of Service (QoS) levels
+// described in the MQTT spec, a message persistence mechanism must be
+// used. This is done by providing a type which implements the Store
+// interface. For convenience, FileStore and MemoryStore are provided
+// implementations that should be sufficient for most use cases. More
+// information can be found in their respective documentation.
+// Numerous connection options may be specified by configuring a
+// and then supplying a ClientOptions type.
+type Client interface {
+ // IsConnected returns a bool signifying whether
+ // the client is connected or not.
+ IsConnected() bool
+ // IsConnectionOpen return a bool signifying wether the client has an active
+ // connection to mqtt broker, i.e not in disconnected or reconnect mode
+ IsConnectionOpen() bool
+ // Connect will create a connection to the message broker, by default
+ // it will attempt to connect at v3.1.1 and auto retry at v3.1 if that
+ // fails
+ Connect() Token
+ // Disconnect will end the connection with the server, but not before waiting
+ // the specified number of milliseconds to wait for existing work to be
+ // completed.
+ Disconnect(quiesce uint)
+ // Publish will publish a message with the specified QoS and content
+ // to the specified topic.
+ // Returns a token to track delivery of the message to the broker
+ Publish(topic string, qos byte, retained bool, payload interface{}) Token
+ // Subscribe starts a new subscription. Provide a MessageHandler to be executed when
+ // a message is published on the topic provided, or nil for the default handler
+ Subscribe(topic string, qos byte, callback MessageHandler) Token
+ // SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to
+ // be executed when a message is published on one of the topics provided, or nil for the
+ // default handler
+ SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token
+ // Unsubscribe will end the subscription from each of the topics provided.
+ // Messages published to those topics from other clients will no longer be
+ // received.
+ Unsubscribe(topics ...string) Token
+ // AddRoute allows you to add a handler for messages on a specific topic
+ // without making a subscription. For example having a different handler
+ // for parts of a wildcard subscription
+ AddRoute(topic string, callback MessageHandler)
+ // OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions
+ // in use by the client.
+ OptionsReader() ClientOptionsReader
+}
+
+// client implements the Client interface
+type client struct {
+ lastSent atomic.Value
+ lastReceived atomic.Value
+ pingOutstanding int32
+ status uint32
+ sync.RWMutex
+ messageIds
+ conn net.Conn
+ ibound chan packets.ControlPacket
+ obound chan *PacketAndToken
+ oboundP chan *PacketAndToken
+ msgRouter *router
+ stopRouter chan bool
+ incomingPubChan chan *packets.PublishPacket
+ errors chan error
+ stop chan struct{}
+ persist Store
+ options ClientOptions
+ workers sync.WaitGroup
+}
+
+// NewClient will create an MQTT v3.1.1 client with all of the options specified
+// in the provided ClientOptions. The client must have the Connect method called
+// on it before it may be used. This is to make sure resources (such as a net
+// connection) are created before the application is actually ready.
+func NewClient(o *ClientOptions) Client {
+ c := &client{}
+ c.options = *o
+
+ if c.options.Store == nil {
+ c.options.Store = NewMemoryStore()
+ }
+ switch c.options.ProtocolVersion {
+ case 3, 4:
+ c.options.protocolVersionExplicit = true
+ case 0x83, 0x84:
+ c.options.protocolVersionExplicit = true
+ default:
+ c.options.ProtocolVersion = 4
+ c.options.protocolVersionExplicit = false
+ }
+ c.persist = c.options.Store
+ c.status = disconnected
+ c.messageIds = messageIds{index: make(map[uint16]tokenCompletor)}
+ c.msgRouter, c.stopRouter = newRouter()
+ c.msgRouter.setDefaultHandler(c.options.DefaultPublishHandler)
+ if !c.options.AutoReconnect {
+ c.options.MessageChannelDepth = 0
+ }
+ return c
+}
+
+// AddRoute allows you to add a handler for messages on a specific topic
+// without making a subscription. For example having a different handler
+// for parts of a wildcard subscription
+func (c *client) AddRoute(topic string, callback MessageHandler) {
+ if callback != nil {
+ c.msgRouter.addRoute(topic, callback)
+ }
+}
+
+// IsConnected returns a bool signifying whether
+// the client is connected or not.
+func (c *client) IsConnected() bool {
+ c.RLock()
+ defer c.RUnlock()
+ status := atomic.LoadUint32(&c.status)
+ switch {
+ case status == connected:
+ return true
+ case c.options.AutoReconnect && status > connecting:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsConnectionOpen return a bool signifying whether the client has an active
+// connection to mqtt broker, i.e not in disconnected or reconnect mode
+func (c *client) IsConnectionOpen() bool {
+ c.RLock()
+ defer c.RUnlock()
+ status := atomic.LoadUint32(&c.status)
+ switch {
+ case status == connected:
+ return true
+ default:
+ return false
+ }
+}
+
+func (c *client) connectionStatus() uint32 {
+ c.RLock()
+ defer c.RUnlock()
+ status := atomic.LoadUint32(&c.status)
+ return status
+}
+
+func (c *client) setConnected(status uint32) {
+ c.Lock()
+ defer c.Unlock()
+ atomic.StoreUint32(&c.status, uint32(status))
+}
+
+//ErrNotConnected is the error returned from function calls that are
+//made when the client is not connected to a broker
+var ErrNotConnected = errors.New("Not Connected")
+
+// Connect will create a connection to the message broker, by default
+// it will attempt to connect at v3.1.1 and auto retry at v3.1 if that
+// fails
+func (c *client) Connect() Token {
+ var err error
+ t := newToken(packets.Connect).(*ConnectToken)
+ DEBUG.Println(CLI, "Connect()")
+
+ c.obound = make(chan *PacketAndToken, c.options.MessageChannelDepth)
+ c.oboundP = make(chan *PacketAndToken, c.options.MessageChannelDepth)
+ c.ibound = make(chan packets.ControlPacket)
+
+ go func() {
+ c.persist.Open()
+
+ c.setConnected(connecting)
+ c.errors = make(chan error, 1)
+ c.stop = make(chan struct{})
+
+ var rc byte
+ protocolVersion := c.options.ProtocolVersion
+
+ if len(c.options.Servers) == 0 {
+ t.setError(fmt.Errorf("No servers defined to connect to"))
+ return
+ }
+
+ for _, broker := range c.options.Servers {
+ cm := newConnectMsgFromOptions(&c.options, broker)
+ c.options.ProtocolVersion = protocolVersion
+ CONN:
+ DEBUG.Println(CLI, "about to write new connect msg")
+ c.conn, err = openConnection(broker, c.options.TLSConfig, c.options.ConnectTimeout, c.options.HTTPHeaders)
+ if err == nil {
+ DEBUG.Println(CLI, "socket connected to broker")
+ switch c.options.ProtocolVersion {
+ case 3:
+ DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
+ cm.ProtocolName = "MQIsdp"
+ cm.ProtocolVersion = 3
+ case 0x83:
+ DEBUG.Println(CLI, "Using MQTT 3.1b protocol")
+ cm.ProtocolName = "MQIsdp"
+ cm.ProtocolVersion = 0x83
+ case 0x84:
+ DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol")
+ cm.ProtocolName = "MQTT"
+ cm.ProtocolVersion = 0x84
+ default:
+ DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
+ c.options.ProtocolVersion = 4
+ cm.ProtocolName = "MQTT"
+ cm.ProtocolVersion = 4
+ }
+ cm.Write(c.conn)
+
+ rc, t.sessionPresent = c.connect()
+ if rc != packets.Accepted {
+ if c.conn != nil {
+ c.conn.Close()
+ c.conn = nil
+ }
+ //if the protocol version was explicitly set don't do any fallback
+ if c.options.protocolVersionExplicit {
+ ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc])
+ continue
+ }
+ if c.options.ProtocolVersion == 4 {
+ DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol")
+ c.options.ProtocolVersion = 3
+ goto CONN
+ }
+ }
+ break
+ } else {
+ ERROR.Println(CLI, err.Error())
+ WARN.Println(CLI, "failed to connect to broker, trying next")
+ rc = packets.ErrNetworkError
+ }
+ }
+
+ if c.conn == nil {
+ ERROR.Println(CLI, "Failed to connect to a broker")
+ c.setConnected(disconnected)
+ c.persist.Close()
+ t.returnCode = rc
+ if rc != packets.ErrNetworkError {
+ t.setError(packets.ConnErrors[rc])
+ } else {
+ t.setError(fmt.Errorf("%s : %s", packets.ConnErrors[rc], err))
+ }
+ return
+ }
+
+ c.options.protocolVersionExplicit = true
+
+ if c.options.KeepAlive != 0 {
+ atomic.StoreInt32(&c.pingOutstanding, 0)
+ c.lastReceived.Store(time.Now())
+ c.lastSent.Store(time.Now())
+ c.workers.Add(1)
+ go keepalive(c)
+ }
+
+ c.incomingPubChan = make(chan *packets.PublishPacket, c.options.MessageChannelDepth)
+ c.msgRouter.matchAndDispatch(c.incomingPubChan, c.options.Order, c)
+
+ c.setConnected(connected)
+ DEBUG.Println(CLI, "client is connected")
+ if c.options.OnConnect != nil {
+ go c.options.OnConnect(c)
+ }
+
+ c.workers.Add(4)
+ go errorWatch(c)
+ go alllogic(c)
+ go outgoing(c)
+ go incoming(c)
+
+ // Take care of any messages in the store
+ if c.options.CleanSession == false {
+ c.resume(c.options.ResumeSubs)
+ } else {
+ c.persist.Reset()
+ }
+
+ DEBUG.Println(CLI, "exit startClient")
+ t.flowComplete()
+ }()
+ return t
+}
+
+// internal function used to reconnect the client when it loses its connection
+func (c *client) reconnect() {
+ DEBUG.Println(CLI, "enter reconnect")
+ var (
+ err error
+
+ rc = byte(1)
+ sleep = time.Duration(1 * time.Second)
+ )
+
+ for rc != 0 && atomic.LoadUint32(&c.status) != disconnected {
+ for _, broker := range c.options.Servers {
+ cm := newConnectMsgFromOptions(&c.options, broker)
+ DEBUG.Println(CLI, "about to write new connect msg")
+ c.Lock()
+ c.conn, err = openConnection(broker, c.options.TLSConfig, c.options.ConnectTimeout, c.options.HTTPHeaders)
+ c.Unlock()
+ if err == nil {
+ DEBUG.Println(CLI, "socket connected to broker")
+ switch c.options.ProtocolVersion {
+ case 0x83:
+ DEBUG.Println(CLI, "Using MQTT 3.1b protocol")
+ cm.ProtocolName = "MQIsdp"
+ cm.ProtocolVersion = 0x83
+ case 0x84:
+ DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol")
+ cm.ProtocolName = "MQTT"
+ cm.ProtocolVersion = 0x84
+ case 3:
+ DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
+ cm.ProtocolName = "MQIsdp"
+ cm.ProtocolVersion = 3
+ default:
+ DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
+ cm.ProtocolName = "MQTT"
+ cm.ProtocolVersion = 4
+ }
+ cm.Write(c.conn)
+
+ rc, _ = c.connect()
+ if rc != packets.Accepted {
+ c.conn.Close()
+ c.conn = nil
+ //if the protocol version was explicitly set don't do any fallback
+ if c.options.protocolVersionExplicit {
+ ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not Accepted, but rather", packets.ConnackReturnCodes[rc])
+ continue
+ }
+ }
+ break
+ } else {
+ ERROR.Println(CLI, err.Error())
+ WARN.Println(CLI, "failed to connect to broker, trying next")
+ rc = packets.ErrNetworkError
+ }
+ }
+ if rc != 0 {
+ DEBUG.Println(CLI, "Reconnect failed, sleeping for", int(sleep.Seconds()), "seconds")
+ time.Sleep(sleep)
+ if sleep < c.options.MaxReconnectInterval {
+ sleep *= 2
+ }
+
+ if sleep > c.options.MaxReconnectInterval {
+ sleep = c.options.MaxReconnectInterval
+ }
+ }
+ }
+ // Disconnect() must have been called while we were trying to reconnect.
+ if c.connectionStatus() == disconnected {
+ DEBUG.Println(CLI, "Client moved to disconnected state while reconnecting, abandoning reconnect")
+ return
+ }
+
+ c.stop = make(chan struct{})
+
+ if c.options.KeepAlive != 0 {
+ atomic.StoreInt32(&c.pingOutstanding, 0)
+ c.lastReceived.Store(time.Now())
+ c.lastSent.Store(time.Now())
+ c.workers.Add(1)
+ go keepalive(c)
+ }
+
+ c.setConnected(connected)
+ DEBUG.Println(CLI, "client is reconnected")
+ if c.options.OnConnect != nil {
+ go c.options.OnConnect(c)
+ }
+
+ c.workers.Add(4)
+ go errorWatch(c)
+ go alllogic(c)
+ go outgoing(c)
+ go incoming(c)
+
+ c.resume(false)
+}
+
+// This function is only used for receiving a connack
+// when the connection is first started.
+// This prevents receiving incoming data while resume
+// is in progress if clean session is false.
+func (c *client) connect() (byte, bool) {
+ DEBUG.Println(NET, "connect started")
+
+ ca, err := packets.ReadPacket(c.conn)
+ if err != nil {
+ ERROR.Println(NET, "connect got error", err)
+ return packets.ErrNetworkError, false
+ }
+ if ca == nil {
+ ERROR.Println(NET, "received nil packet")
+ return packets.ErrNetworkError, false
+ }
+
+ msg, ok := ca.(*packets.ConnackPacket)
+ if !ok {
+ ERROR.Println(NET, "received msg that was not CONNACK")
+ return packets.ErrNetworkError, false
+ }
+
+ DEBUG.Println(NET, "received connack")
+ return msg.ReturnCode, msg.SessionPresent
+}
+
+// Disconnect will end the connection with the server, but not before waiting
+// the specified number of milliseconds to wait for existing work to be
+// completed.
+func (c *client) Disconnect(quiesce uint) {
+ status := atomic.LoadUint32(&c.status)
+ if status == connected {
+ DEBUG.Println(CLI, "disconnecting")
+ c.setConnected(disconnected)
+
+ dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
+ dt := newToken(packets.Disconnect)
+ c.oboundP <- &PacketAndToken{p: dm, t: dt}
+
+ // wait for work to finish, or quiesce time consumed
+ dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond)
+ } else {
+ WARN.Println(CLI, "Disconnect() called but not connected (disconnected/reconnecting)")
+ c.setConnected(disconnected)
+ }
+
+ c.disconnect()
+}
+
+// ForceDisconnect will end the connection with the mqtt broker immediately.
+func (c *client) forceDisconnect() {
+ if !c.IsConnected() {
+ WARN.Println(CLI, "already disconnected")
+ return
+ }
+ c.setConnected(disconnected)
+ c.conn.Close()
+ DEBUG.Println(CLI, "forcefully disconnecting")
+ c.disconnect()
+}
+
+func (c *client) internalConnLost(err error) {
+ // Only do anything if this was called and we are still "connected"
+ // forceDisconnect can cause incoming/outgoing/alllogic to end with
+ // error from closing the socket but state will be "disconnected"
+ if c.IsConnected() {
+ c.closeStop()
+ c.conn.Close()
+ c.workers.Wait()
+ if c.options.CleanSession && !c.options.AutoReconnect {
+ c.messageIds.cleanUp()
+ }
+ if c.options.AutoReconnect {
+ c.setConnected(reconnecting)
+ go c.reconnect()
+ } else {
+ c.setConnected(disconnected)
+ }
+ if c.options.OnConnectionLost != nil {
+ go c.options.OnConnectionLost(c, err)
+ }
+ }
+}
+
+func (c *client) closeStop() {
+ c.Lock()
+ defer c.Unlock()
+ select {
+ case <-c.stop:
+ DEBUG.Println("In disconnect and stop channel is already closed")
+ default:
+ if c.stop != nil {
+ close(c.stop)
+ }
+ }
+}
+
+func (c *client) closeStopRouter() {
+ c.Lock()
+ defer c.Unlock()
+ select {
+ case <-c.stopRouter:
+ DEBUG.Println("In disconnect and stop channel is already closed")
+ default:
+ if c.stopRouter != nil {
+ close(c.stopRouter)
+ }
+ }
+}
+
+func (c *client) closeConn() {
+ c.Lock()
+ defer c.Unlock()
+ if c.conn != nil {
+ c.conn.Close()
+ }
+}
+
+func (c *client) disconnect() {
+ c.closeStop()
+ c.closeConn()
+ c.workers.Wait()
+ c.messageIds.cleanUp()
+ c.closeStopRouter()
+ DEBUG.Println(CLI, "disconnected")
+ c.persist.Close()
+}
+
+// Publish will publish a message with the specified QoS and content
+// to the specified topic.
+// Returns a token to track delivery of the message to the broker
+func (c *client) Publish(topic string, qos byte, retained bool, payload interface{}) Token {
+ token := newToken(packets.Publish).(*PublishToken)
+ DEBUG.Println(CLI, "enter Publish")
+ switch {
+ case !c.IsConnected():
+ token.setError(ErrNotConnected)
+ return token
+ case c.connectionStatus() == reconnecting && qos == 0:
+ token.flowComplete()
+ return token
+ }
+ pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
+ pub.Qos = qos
+ pub.TopicName = topic
+ pub.Retain = retained
+ switch payload.(type) {
+ case string:
+ pub.Payload = []byte(payload.(string))
+ case []byte:
+ pub.Payload = payload.([]byte)
+ default:
+ token.setError(fmt.Errorf("Unknown payload type"))
+ return token
+ }
+
+ if pub.Qos != 0 && pub.MessageID == 0 {
+ pub.MessageID = c.getID(token)
+ token.messageID = pub.MessageID
+ }
+ persistOutbound(c.persist, pub)
+ if c.connectionStatus() == reconnecting {
+ DEBUG.Println(CLI, "storing publish message (reconnecting), topic:", topic)
+ } else {
+ DEBUG.Println(CLI, "sending publish message, topic:", topic)
+ c.obound <- &PacketAndToken{p: pub, t: token}
+ }
+ return token
+}
+
+// Subscribe starts a new subscription. Provide a MessageHandler to be executed when
+// a message is published on the topic provided.
+func (c *client) Subscribe(topic string, qos byte, callback MessageHandler) Token {
+ token := newToken(packets.Subscribe).(*SubscribeToken)
+ DEBUG.Println(CLI, "enter Subscribe")
+ if !c.IsConnected() {
+ token.setError(ErrNotConnected)
+ return token
+ }
+ sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
+ if err := validateTopicAndQos(topic, qos); err != nil {
+ token.setError(err)
+ return token
+ }
+ sub.Topics = append(sub.Topics, topic)
+ sub.Qoss = append(sub.Qoss, qos)
+ DEBUG.Println(CLI, sub.String())
+
+ if strings.HasPrefix(topic, "$share") {
+ topic = strings.Join(strings.Split(topic, "/")[2:], "/")
+ }
+
+ if callback != nil {
+ c.msgRouter.addRoute(topic, callback)
+ }
+
+ token.subs = append(token.subs, topic)
+ c.oboundP <- &PacketAndToken{p: sub, t: token}
+ DEBUG.Println(CLI, "exit Subscribe")
+ return token
+}
+
+// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to
+// be executed when a message is published on one of the topics provided.
+func (c *client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token {
+ var err error
+ token := newToken(packets.Subscribe).(*SubscribeToken)
+ DEBUG.Println(CLI, "enter SubscribeMultiple")
+ if !c.IsConnected() {
+ token.setError(ErrNotConnected)
+ return token
+ }
+ sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
+ if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil {
+ token.setError(err)
+ return token
+ }
+
+ if callback != nil {
+ for topic := range filters {
+ c.msgRouter.addRoute(topic, callback)
+ }
+ }
+ token.subs = make([]string, len(sub.Topics))
+ copy(token.subs, sub.Topics)
+ c.oboundP <- &PacketAndToken{p: sub, t: token}
+ DEBUG.Println(CLI, "exit SubscribeMultiple")
+ return token
+}
+
+// Load all stored messages and resend them
+// Call this to ensure QOS > 1,2 even after an application crash
+func (c *client) resume(subscription bool) {
+
+ storedKeys := c.persist.All()
+ for _, key := range storedKeys {
+ packet := c.persist.Get(key)
+ if packet == nil {
+ continue
+ }
+ details := packet.Details()
+ if isKeyOutbound(key) {
+ switch packet.(type) {
+ case *packets.SubscribePacket:
+ if subscription {
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending subscribe (%d)", details.MessageID))
+ token := newToken(packets.Subscribe).(*SubscribeToken)
+ c.oboundP <- &PacketAndToken{p: packet, t: token}
+ }
+ case *packets.UnsubscribePacket:
+ if subscription {
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending unsubscribe (%d)", details.MessageID))
+ token := newToken(packets.Unsubscribe).(*UnsubscribeToken)
+ c.oboundP <- &PacketAndToken{p: packet, t: token}
+ }
+ case *packets.PubrelPacket:
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending pubrel (%d)", details.MessageID))
+ select {
+ case c.oboundP <- &PacketAndToken{p: packet, t: nil}:
+ case <-c.stop:
+ }
+ case *packets.PublishPacket:
+ token := newToken(packets.Publish).(*PublishToken)
+ token.messageID = details.MessageID
+ c.claimID(token, details.MessageID)
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending publish (%d)", details.MessageID))
+ DEBUG.Println(STR, details)
+ c.obound <- &PacketAndToken{p: packet, t: token}
+ default:
+ ERROR.Println(STR, "invalid message type in store (discarded)")
+ c.persist.Del(key)
+ }
+ } else {
+ switch packet.(type) {
+ case *packets.PubrelPacket, *packets.PublishPacket:
+ DEBUG.Println(STR, fmt.Sprintf("loaded pending incomming (%d)", details.MessageID))
+ select {
+ case c.ibound <- packet:
+ case <-c.stop:
+ }
+ default:
+ ERROR.Println(STR, "invalid message type in store (discarded)")
+ c.persist.Del(key)
+ }
+ }
+ }
+}
+
+// Unsubscribe will end the subscription from each of the topics provided.
+// Messages published to those topics from other clients will no longer be
+// received.
+func (c *client) Unsubscribe(topics ...string) Token {
+ token := newToken(packets.Unsubscribe).(*UnsubscribeToken)
+ DEBUG.Println(CLI, "enter Unsubscribe")
+ if !c.IsConnected() {
+ token.setError(ErrNotConnected)
+ return token
+ }
+ unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
+ unsub.Topics = make([]string, len(topics))
+ copy(unsub.Topics, topics)
+
+ c.oboundP <- &PacketAndToken{p: unsub, t: token}
+ for _, topic := range topics {
+ c.msgRouter.deleteRoute(topic)
+ }
+
+ DEBUG.Println(CLI, "exit Unsubscribe")
+ return token
+}
+
+// OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions
+// in use by the client.
+func (c *client) OptionsReader() ClientOptionsReader {
+ r := ClientOptionsReader{options: &c.options}
+ return r
+}
+
+//DefaultConnectionLostHandler is a definition of a function that simply
+//reports to the DEBUG log the reason for the client losing a connection.
+func DefaultConnectionLostHandler(client Client, reason error) {
+ DEBUG.Println("Connection lost:", reason.Error())
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/components.go b/vendor/github.com/eclipse/paho.mqtt.golang/components.go
new file mode 100644
index 0000000..01f5faf
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/components.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+type component string
+
+// Component names for debug output
+const (
+ NET component = "[net] "
+ PNG component = "[pinger] "
+ CLI component = "[client] "
+ DEC component = "[decode] "
+ MES component = "[message] "
+ STR component = "[store] "
+ MID component = "[msgids] "
+ TST component = "[test] "
+ STA component = "[state] "
+ ERR component = "[error] "
+)
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10
new file mode 100644
index 0000000..cf989f1
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10
@@ -0,0 +1,15 @@
+
+Eclipse Distribution License - v 1.0
+
+Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10
new file mode 100644
index 0000000..79e486c
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10
@@ -0,0 +1,70 @@
+Eclipse Public License - v 1.0
+
+THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+1. DEFINITIONS
+
+"Contribution" means:
+
+a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
+b) in the case of each subsequent Contributor:
+i) changes to the Program, and
+ii) additions to the Program;
+where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
+"Contributor" means any person or entity that distributes the Program.
+
+"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
+
+"Program" means the Contributions distributed in accordance with this Agreement.
+
+"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
+
+2. GRANT OF RIGHTS
+
+a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
+b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
+c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
+d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
+3. REQUIREMENTS
+
+A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
+
+a) it complies with the terms and conditions of this Agreement; and
+b) its license agreement:
+i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
+ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
+iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
+iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
+When the Program is made available in source code form:
+
+a) it must be made available under this Agreement; and
+b) a copy of this Agreement must be included with each copy of the Program.
+Contributors may not remove or alter any copyright notices contained within the Program.
+
+Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
+
+4. COMMERCIAL DISTRIBUTION
+
+Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
+
+For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
+
+5. NO WARRANTY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
+
+6. DISCLAIMER OF LIABILITY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+7. GENERAL
+
+If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
+
+If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
+
+All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
+
+Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
+
+This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
new file mode 100644
index 0000000..c4a0d36
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "sort"
+ "sync"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const (
+ msgExt = ".msg"
+ tmpExt = ".tmp"
+ corruptExt = ".CORRUPT"
+)
+
+// FileStore implements the store interface using the filesystem to provide
+// true persistence, even across client failure. This is designed to use a
+// single directory per running client. If you are running multiple clients
+// on the same filesystem, you will need to be careful to specify unique
+// store directories for each.
+type FileStore struct {
+ sync.RWMutex
+ directory string
+ opened bool
+}
+
+// NewFileStore will create a new FileStore which stores its messages in the
+// directory provided.
+func NewFileStore(directory string) *FileStore {
+ store := &FileStore{
+ directory: directory,
+ opened: false,
+ }
+ return store
+}
+
+// Open will allow the FileStore to be used.
+func (store *FileStore) Open() {
+ store.Lock()
+ defer store.Unlock()
+ // if no store directory was specified in ClientOpts, by default use the
+ // current working directory
+ if store.directory == "" {
+ store.directory, _ = os.Getwd()
+ }
+
+ // if store dir exists, great, otherwise, create it
+ if !exists(store.directory) {
+ perms := os.FileMode(0770)
+ merr := os.MkdirAll(store.directory, perms)
+ chkerr(merr)
+ }
+ store.opened = true
+ DEBUG.Println(STR, "store is opened at", store.directory)
+}
+
+// Close will disallow the FileStore from being used.
+func (store *FileStore) Close() {
+ store.Lock()
+ defer store.Unlock()
+ store.opened = false
+ DEBUG.Println(STR, "store is closed")
+}
+
+// Put will put a message into the store, associated with the provided
+// key value.
+func (store *FileStore) Put(key string, m packets.ControlPacket) {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use file store, but not open")
+ return
+ }
+ full := fullpath(store.directory, key)
+ write(store.directory, key, m)
+ if !exists(full) {
+ ERROR.Println(STR, "file not created:", full)
+ }
+}
+
+// Get will retrieve a message from the store, the one associated with
+// the provided key value.
+func (store *FileStore) Get(key string) packets.ControlPacket {
+ store.RLock()
+ defer store.RUnlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use file store, but not open")
+ return nil
+ }
+ filepath := fullpath(store.directory, key)
+ if !exists(filepath) {
+ return nil
+ }
+ mfile, oerr := os.Open(filepath)
+ chkerr(oerr)
+ msg, rerr := packets.ReadPacket(mfile)
+ chkerr(mfile.Close())
+
+ // Message was unreadable, return nil
+ if rerr != nil {
+ newpath := corruptpath(store.directory, key)
+ WARN.Println(STR, "corrupted file detected:", rerr.Error(), "archived at:", newpath)
+ os.Rename(filepath, newpath)
+ return nil
+ }
+ return msg
+}
+
+// All will provide a list of all of the keys associated with messages
+// currenly residing in the FileStore.
+func (store *FileStore) All() []string {
+ store.RLock()
+ defer store.RUnlock()
+ return store.all()
+}
+
+// Del will remove the persisted message associated with the provided
+// key from the FileStore.
+func (store *FileStore) Del(key string) {
+ store.Lock()
+ defer store.Unlock()
+ store.del(key)
+}
+
+// Reset will remove all persisted messages from the FileStore.
+func (store *FileStore) Reset() {
+ store.Lock()
+ defer store.Unlock()
+ WARN.Println(STR, "FileStore Reset")
+ for _, key := range store.all() {
+ store.del(key)
+ }
+}
+
+// lockless
+func (store *FileStore) all() []string {
+ var err error
+ var keys []string
+ var files fileInfos
+
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use file store, but not open")
+ return nil
+ }
+
+ files, err = ioutil.ReadDir(store.directory)
+ chkerr(err)
+ sort.Sort(files)
+ for _, f := range files {
+ DEBUG.Println(STR, "file in All():", f.Name())
+ name := f.Name()
+ if name[len(name)-4:len(name)] != msgExt {
+ DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name)
+ continue
+ }
+ key := name[0 : len(name)-4] // remove file extension
+ keys = append(keys, key)
+ }
+ return keys
+}
+
+// lockless
+func (store *FileStore) del(key string) {
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use file store, but not open")
+ return
+ }
+ DEBUG.Println(STR, "store del filepath:", store.directory)
+ DEBUG.Println(STR, "store delete key:", key)
+ filepath := fullpath(store.directory, key)
+ DEBUG.Println(STR, "path of deletion:", filepath)
+ if !exists(filepath) {
+ WARN.Println(STR, "store could not delete key:", key)
+ return
+ }
+ rerr := os.Remove(filepath)
+ chkerr(rerr)
+ DEBUG.Println(STR, "del msg:", key)
+ if exists(filepath) {
+ ERROR.Println(STR, "file not deleted:", filepath)
+ }
+}
+
+func fullpath(store string, key string) string {
+ p := path.Join(store, key+msgExt)
+ return p
+}
+
+func tmppath(store string, key string) string {
+ p := path.Join(store, key+tmpExt)
+ return p
+}
+
+func corruptpath(store string, key string) string {
+ p := path.Join(store, key+corruptExt)
+ return p
+}
+
+// create file called "X.[messageid].tmp" located in the store
+// the contents of the file is the bytes of the message, then
+// rename it to "X.[messageid].msg", overwriting any existing
+// message with the same id
+// X will be 'i' for inbound messages, and O for outbound messages
+func write(store, key string, m packets.ControlPacket) {
+ temppath := tmppath(store, key)
+ f, err := os.Create(temppath)
+ chkerr(err)
+ werr := m.Write(f)
+ chkerr(werr)
+ cerr := f.Close()
+ chkerr(cerr)
+ rerr := os.Rename(temppath, fullpath(store, key))
+ chkerr(rerr)
+}
+
+func exists(file string) bool {
+ if _, err := os.Stat(file); err != nil {
+ if os.IsNotExist(err) {
+ return false
+ }
+ chkerr(err)
+ }
+ return true
+}
+
+type fileInfos []os.FileInfo
+
+func (f fileInfos) Len() int {
+ return len(f)
+}
+
+func (f fileInfos) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+func (f fileInfos) Less(i, j int) bool {
+ return f[i].ModTime().Before(f[j].ModTime())
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go
new file mode 100644
index 0000000..499c490
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "sync"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// MemoryStore implements the store interface to provide a "persistence"
+// mechanism wholly stored in memory. This is only useful for
+// as long as the client instance exists.
+type MemoryStore struct {
+ sync.RWMutex
+ messages map[string]packets.ControlPacket
+ opened bool
+}
+
+// NewMemoryStore returns a pointer to a new instance of
+// MemoryStore, the instance is not initialized and ready to
+// use until Open() has been called on it.
+func NewMemoryStore() *MemoryStore {
+ store := &MemoryStore{
+ messages: make(map[string]packets.ControlPacket),
+ opened: false,
+ }
+ return store
+}
+
+// Open initializes a MemoryStore instance.
+func (store *MemoryStore) Open() {
+ store.Lock()
+ defer store.Unlock()
+ store.opened = true
+ DEBUG.Println(STR, "memorystore initialized")
+}
+
+// Put takes a key and a pointer to a Message and stores the
+// message.
+func (store *MemoryStore) Put(key string, message packets.ControlPacket) {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use memory store, but not open")
+ return
+ }
+ store.messages[key] = message
+}
+
+// Get takes a key and looks in the store for a matching Message
+// returning either the Message pointer or nil.
+func (store *MemoryStore) Get(key string) packets.ControlPacket {
+ store.RLock()
+ defer store.RUnlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use memory store, but not open")
+ return nil
+ }
+ mid := mIDFromKey(key)
+ m := store.messages[key]
+ if m == nil {
+ CRITICAL.Println(STR, "memorystore get: message", mid, "not found")
+ } else {
+ DEBUG.Println(STR, "memorystore get: message", mid, "found")
+ }
+ return m
+}
+
+// All returns a slice of strings containing all the keys currently
+// in the MemoryStore.
+func (store *MemoryStore) All() []string {
+ store.RLock()
+ defer store.RUnlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use memory store, but not open")
+ return nil
+ }
+ keys := []string{}
+ for k := range store.messages {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+// Del takes a key, searches the MemoryStore and if the key is found
+// deletes the Message pointer associated with it.
+func (store *MemoryStore) Del(key string) {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to use memory store, but not open")
+ return
+ }
+ mid := mIDFromKey(key)
+ m := store.messages[key]
+ if m == nil {
+ WARN.Println(STR, "memorystore del: message", mid, "not found")
+ } else {
+ delete(store.messages, key)
+ DEBUG.Println(STR, "memorystore del: message", mid, "was deleted")
+ }
+}
+
+// Close will disallow modifications to the state of the store.
+func (store *MemoryStore) Close() {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to close memory store, but not open")
+ return
+ }
+ store.opened = false
+ DEBUG.Println(STR, "memorystore closed")
+}
+
+// Reset eliminates all persisted message data in the store.
+func (store *MemoryStore) Reset() {
+ store.Lock()
+ defer store.Unlock()
+ if !store.opened {
+ ERROR.Println(STR, "Trying to reset memory store, but not open")
+ }
+ store.messages = make(map[string]packets.ControlPacket)
+ WARN.Println(STR, "memorystore wiped")
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/message.go b/vendor/github.com/eclipse/paho.mqtt.golang/message.go
new file mode 100644
index 0000000..903e5dc
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/message.go
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "net/url"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+ "sync"
+)
+
+// Message defines the externals that a message implementation must support
+// these are received messages that are passed to the callbacks, not internal
+// messages
+type Message interface {
+ Duplicate() bool
+ Qos() byte
+ Retained() bool
+ Topic() string
+ MessageID() uint16
+ Payload() []byte
+ Ack()
+}
+
+type message struct {
+ duplicate bool
+ qos byte
+ retained bool
+ topic string
+ messageID uint16
+ payload []byte
+ once sync.Once
+ ack func()
+}
+
+func (m *message) Duplicate() bool {
+ return m.duplicate
+}
+
+func (m *message) Qos() byte {
+ return m.qos
+}
+
+func (m *message) Retained() bool {
+ return m.retained
+}
+
+func (m *message) Topic() string {
+ return m.topic
+}
+
+func (m *message) MessageID() uint16 {
+ return m.messageID
+}
+
+func (m *message) Payload() []byte {
+ return m.payload
+}
+
+func (m *message) Ack() {
+ m.once.Do(m.ack)
+}
+
+func messageFromPublish(p *packets.PublishPacket, ack func()) Message {
+ return &message{
+ duplicate: p.Dup,
+ qos: p.Qos,
+ retained: p.Retain,
+ topic: p.TopicName,
+ messageID: p.MessageID,
+ payload: p.Payload,
+ ack: ack,
+ }
+}
+
+func newConnectMsgFromOptions(options *ClientOptions, broker *url.URL) *packets.ConnectPacket {
+ m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
+
+ m.CleanSession = options.CleanSession
+ m.WillFlag = options.WillEnabled
+ m.WillRetain = options.WillRetained
+ m.ClientIdentifier = options.ClientID
+
+ if options.WillEnabled {
+ m.WillQos = options.WillQos
+ m.WillTopic = options.WillTopic
+ m.WillMessage = options.WillPayload
+ }
+
+ username := options.Username
+ password := options.Password
+ if broker.User != nil {
+ username = broker.User.Username()
+ if pwd, ok := broker.User.Password(); ok {
+ password = pwd
+ }
+ }
+ if options.CredentialsProvider != nil {
+ username, password = options.CredentialsProvider()
+ }
+
+ if username != "" {
+ m.UsernameFlag = true
+ m.Username = username
+ //mustn't have password without user as well
+ if password != "" {
+ m.PasswordFlag = true
+ m.Password = []byte(password)
+ }
+ }
+
+ m.Keepalive = uint16(options.KeepAlive)
+
+ return m
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go
new file mode 100644
index 0000000..9a5fa9f
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+// MId is 16 bit message id as specified by the MQTT spec.
+// In general, these values should not be depended upon by
+// the client application.
+type MId uint16
+
+type messageIds struct {
+ sync.RWMutex
+ index map[uint16]tokenCompletor
+}
+
+const (
+ midMin uint16 = 1
+ midMax uint16 = 65535
+)
+
+func (mids *messageIds) cleanUp() {
+ mids.Lock()
+ for _, token := range mids.index {
+ switch token.(type) {
+ case *PublishToken:
+ token.setError(fmt.Errorf("Connection lost before Publish completed"))
+ case *SubscribeToken:
+ token.setError(fmt.Errorf("Connection lost before Subscribe completed"))
+ case *UnsubscribeToken:
+ token.setError(fmt.Errorf("Connection lost before Unsubscribe completed"))
+ case nil:
+ continue
+ }
+ token.flowComplete()
+ }
+ mids.index = make(map[uint16]tokenCompletor)
+ mids.Unlock()
+ DEBUG.Println(MID, "cleaned up")
+}
+
+func (mids *messageIds) freeID(id uint16) {
+ mids.Lock()
+ delete(mids.index, id)
+ mids.Unlock()
+}
+
+func (mids *messageIds) claimID(token tokenCompletor, id uint16) {
+ mids.Lock()
+ defer mids.Unlock()
+ if _, ok := mids.index[id]; !ok {
+ mids.index[id] = token
+ } else {
+ old := mids.index[id]
+ old.flowComplete()
+ mids.index[id] = token
+ }
+}
+
+func (mids *messageIds) getID(t tokenCompletor) uint16 {
+ mids.Lock()
+ defer mids.Unlock()
+ for i := midMin; i < midMax; i++ {
+ if _, ok := mids.index[i]; !ok {
+ mids.index[i] = t
+ return i
+ }
+ }
+ return 0
+}
+
+func (mids *messageIds) getToken(id uint16) tokenCompletor {
+ mids.RLock()
+ defer mids.RUnlock()
+ if token, ok := mids.index[id]; ok {
+ return token
+ }
+ return &DummyToken{id: id}
+}
+
+type DummyToken struct {
+ id uint16
+}
+
+func (d *DummyToken) Wait() bool {
+ return true
+}
+
+func (d *DummyToken) WaitTimeout(t time.Duration) bool {
+ return true
+}
+
+func (d *DummyToken) flowComplete() {
+ ERROR.Printf("A lookup for token %d returned nil\n", d.id)
+}
+
+func (d *DummyToken) Error() error {
+ return nil
+}
+
+func (d *DummyToken) setError(e error) {}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/net.go b/vendor/github.com/eclipse/paho.mqtt.golang/net.go
new file mode 100644
index 0000000..3e6366b
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/net.go
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "reflect"
+ "sync/atomic"
+ "time"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+ "golang.org/x/net/proxy"
+ "golang.org/x/net/websocket"
+)
+
+func signalError(c chan<- error, err error) {
+ select {
+ case c <- err:
+ default:
+ }
+}
+
+func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header) (net.Conn, error) {
+ switch uri.Scheme {
+ case "ws":
+ config, _ := websocket.NewConfig(uri.String(), fmt.Sprintf("http://%s", uri.Host))
+ config.Protocol = []string{"mqtt"}
+ config.Header = headers
+ config.Dialer = &net.Dialer{Timeout: timeout}
+ conn, err := websocket.DialConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ conn.PayloadType = websocket.BinaryFrame
+ return conn, err
+ case "wss":
+ config, _ := websocket.NewConfig(uri.String(), fmt.Sprintf("https://%s", uri.Host))
+ config.Protocol = []string{"mqtt"}
+ config.TlsConfig = tlsc
+ config.Header = headers
+ config.Dialer = &net.Dialer{Timeout: timeout}
+ conn, err := websocket.DialConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ conn.PayloadType = websocket.BinaryFrame
+ return conn, err
+ case "tcp":
+ allProxy := os.Getenv("all_proxy")
+ if len(allProxy) == 0 {
+ conn, err := net.DialTimeout("tcp", uri.Host, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+ }
+ proxyDialer := proxy.FromEnvironment()
+
+ conn, err := proxyDialer.Dial("tcp", uri.Host)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+ case "unix":
+ conn, err := net.DialTimeout("unix", uri.Host, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+ case "ssl":
+ fallthrough
+ case "tls":
+ fallthrough
+ case "tcps":
+ allProxy := os.Getenv("all_proxy")
+ if len(allProxy) == 0 {
+ conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+ }
+ proxyDialer := proxy.FromEnvironment()
+
+ conn, err := proxyDialer.Dial("tcp", uri.Host)
+ if err != nil {
+ return nil, err
+ }
+
+ tlsConn := tls.Client(conn, tlsc)
+
+ err = tlsConn.Handshake()
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return tlsConn, nil
+ }
+ return nil, errors.New("Unknown protocol")
+}
+
+// actually read incoming messages off the wire
+// send Message object into ibound channel
+func incoming(c *client) {
+ var err error
+ var cp packets.ControlPacket
+
+ defer c.workers.Done()
+
+ DEBUG.Println(NET, "incoming started")
+
+ for {
+ if cp, err = packets.ReadPacket(c.conn); err != nil {
+ break
+ }
+ DEBUG.Println(NET, "Received Message")
+ select {
+ case c.ibound <- cp:
+ // Notify keepalive logic that we recently received a packet
+ if c.options.KeepAlive != 0 {
+ c.lastReceived.Store(time.Now())
+ }
+ case <-c.stop:
+ // This avoids a deadlock should a message arrive while shutting down.
+ // In that case the "reader" of c.ibound might already be gone
+ WARN.Println(NET, "incoming dropped a received message during shutdown")
+ break
+ }
+ }
+ // We received an error on read.
+ // If disconnect is in progress, swallow error and return
+ select {
+ case <-c.stop:
+ DEBUG.Println(NET, "incoming stopped")
+ return
+ // Not trying to disconnect, send the error to the errors channel
+ default:
+ ERROR.Println(NET, "incoming stopped with error", err)
+ signalError(c.errors, err)
+ return
+ }
+}
+
+// receive a Message object on obound, and then
+// actually send outgoing message to the wire
+func outgoing(c *client) {
+ defer c.workers.Done()
+ DEBUG.Println(NET, "outgoing started")
+
+ for {
+ DEBUG.Println(NET, "outgoing waiting for an outbound message")
+ select {
+ case <-c.stop:
+ DEBUG.Println(NET, "outgoing stopped")
+ return
+ case pub := <-c.obound:
+ msg := pub.p.(*packets.PublishPacket)
+
+ if c.options.WriteTimeout > 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.options.WriteTimeout))
+ }
+
+ if err := msg.Write(c.conn); err != nil {
+ ERROR.Println(NET, "outgoing stopped with error", err)
+ pub.t.setError(err)
+ signalError(c.errors, err)
+ return
+ }
+
+ if c.options.WriteTimeout > 0 {
+ // If we successfully wrote, we don't want the timeout to happen during an idle period
+ // so we reset it to infinite.
+ c.conn.SetWriteDeadline(time.Time{})
+ }
+
+ if msg.Qos == 0 {
+ pub.t.flowComplete()
+ }
+ DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID)
+ case msg := <-c.oboundP:
+ switch msg.p.(type) {
+ case *packets.SubscribePacket:
+ msg.p.(*packets.SubscribePacket).MessageID = c.getID(msg.t)
+ case *packets.UnsubscribePacket:
+ msg.p.(*packets.UnsubscribePacket).MessageID = c.getID(msg.t)
+ }
+ DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p))
+ if err := msg.p.Write(c.conn); err != nil {
+ ERROR.Println(NET, "outgoing stopped with error", err)
+ if msg.t != nil {
+ msg.t.setError(err)
+ }
+ signalError(c.errors, err)
+ return
+ }
+ switch msg.p.(type) {
+ case *packets.DisconnectPacket:
+ msg.t.(*DisconnectToken).flowComplete()
+ DEBUG.Println(NET, "outbound wrote disconnect, stopping")
+ return
+ }
+ }
+ // Reset ping timer after sending control packet.
+ if c.options.KeepAlive != 0 {
+ c.lastSent.Store(time.Now())
+ }
+ }
+}
+
+// receive Message objects on ibound
+// store messages if necessary
+// send replies on obound
+// delete messages from store if necessary
+func alllogic(c *client) {
+ defer c.workers.Done()
+ DEBUG.Println(NET, "logic started")
+
+ for {
+ DEBUG.Println(NET, "logic waiting for msg on ibound")
+
+ select {
+ case msg := <-c.ibound:
+ DEBUG.Println(NET, "logic got msg on ibound")
+ persistInbound(c.persist, msg)
+ switch m := msg.(type) {
+ case *packets.PingrespPacket:
+ DEBUG.Println(NET, "received pingresp")
+ atomic.StoreInt32(&c.pingOutstanding, 0)
+ case *packets.SubackPacket:
+ DEBUG.Println(NET, "received suback, id:", m.MessageID)
+ token := c.getToken(m.MessageID)
+ switch t := token.(type) {
+ case *SubscribeToken:
+ DEBUG.Println(NET, "granted qoss", m.ReturnCodes)
+ for i, qos := range m.ReturnCodes {
+ t.subResult[t.subs[i]] = qos
+ }
+ }
+ token.flowComplete()
+ c.freeID(m.MessageID)
+ case *packets.UnsubackPacket:
+ DEBUG.Println(NET, "received unsuback, id:", m.MessageID)
+ c.getToken(m.MessageID).flowComplete()
+ c.freeID(m.MessageID)
+ case *packets.PublishPacket:
+ DEBUG.Println(NET, "received publish, msgId:", m.MessageID)
+ DEBUG.Println(NET, "putting msg on onPubChan")
+ switch m.Qos {
+ case 2:
+ c.incomingPubChan <- m
+ DEBUG.Println(NET, "done putting msg on incomingPubChan")
+ case 1:
+ c.incomingPubChan <- m
+ DEBUG.Println(NET, "done putting msg on incomingPubChan")
+ case 0:
+ select {
+ case c.incomingPubChan <- m:
+ case <-c.stop:
+ }
+ DEBUG.Println(NET, "done putting msg on incomingPubChan")
+ }
+ case *packets.PubackPacket:
+ DEBUG.Println(NET, "received puback, id:", m.MessageID)
+ // c.receipts.get(msg.MsgId()) <- Receipt{}
+ // c.receipts.end(msg.MsgId())
+ c.getToken(m.MessageID).flowComplete()
+ c.freeID(m.MessageID)
+ case *packets.PubrecPacket:
+ DEBUG.Println(NET, "received pubrec, id:", m.MessageID)
+ prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
+ prel.MessageID = m.MessageID
+ select {
+ case c.oboundP <- &PacketAndToken{p: prel, t: nil}:
+ case <-c.stop:
+ }
+ case *packets.PubrelPacket:
+ DEBUG.Println(NET, "received pubrel, id:", m.MessageID)
+ pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
+ pc.MessageID = m.MessageID
+ persistOutbound(c.persist, pc)
+ select {
+ case c.oboundP <- &PacketAndToken{p: pc, t: nil}:
+ case <-c.stop:
+ }
+ case *packets.PubcompPacket:
+ DEBUG.Println(NET, "received pubcomp, id:", m.MessageID)
+ c.getToken(m.MessageID).flowComplete()
+ c.freeID(m.MessageID)
+ }
+ case <-c.stop:
+ WARN.Println(NET, "logic stopped")
+ return
+ }
+ }
+}
+
+func (c *client) ackFunc(packet *packets.PublishPacket) func() {
+ return func() {
+ switch packet.Qos {
+ case 2:
+ pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
+ pr.MessageID = packet.MessageID
+ DEBUG.Println(NET, "putting pubrec msg on obound")
+ select {
+ case c.oboundP <- &PacketAndToken{p: pr, t: nil}:
+ case <-c.stop:
+ }
+ DEBUG.Println(NET, "done putting pubrec msg on obound")
+ case 1:
+ pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
+ pa.MessageID = packet.MessageID
+ DEBUG.Println(NET, "putting puback msg on obound")
+ persistOutbound(c.persist, pa)
+ select {
+ case c.oboundP <- &PacketAndToken{p: pa, t: nil}:
+ case <-c.stop:
+ }
+ DEBUG.Println(NET, "done putting puback msg on obound")
+ case 0:
+ // do nothing, since there is no need to send an ack packet back
+ }
+ }
+}
+
+func errorWatch(c *client) {
+ defer c.workers.Done()
+ select {
+ case <-c.stop:
+ WARN.Println(NET, "errorWatch stopped")
+ return
+ case err := <-c.errors:
+ ERROR.Println(NET, "error triggered, stopping")
+ go c.internalConnLost(err)
+ return
+ }
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/notice.html b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html
new file mode 100644
index 0000000..f19c483
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html
@@ -0,0 +1,108 @@
+
+
+
+
+
+Eclipse Foundation Software User Agreement
+
+
+
+Eclipse Foundation Software User Agreement
+February 1, 2011
+
+Usage Of Content
+
+THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS
+ (COLLECTIVELY "CONTENT"). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND
+ CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE
+ OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR
+ NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND
+ CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.
+
+Applicable Licenses
+
+Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0
+ ("EPL"). A copy of the EPL is provided with this Content and is also available at http://www.eclipse.org/legal/epl-v10.html.
+ For purposes of the EPL, "Program" will mean the Content.
+
+Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code
+ repository ("Repository") in software modules ("Modules") and made available as downloadable archives ("Downloads").
+
+
+ - Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins ("Plug-ins"), plug-in fragments ("Fragments"), and features ("Features").
+ - Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java™ ARchive) in a directory named "plugins".
+ - A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named "features". Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of the Plug-ins
+ and/or Fragments associated with that Feature.
+ - Features may also include other Features ("Included Features"). Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of Included Features.
+
+
+The terms and conditions governing Plug-ins and Fragments should be contained in files named "about.html" ("Abouts"). The terms and conditions governing Features and
+Included Features should be contained in files named "license.html" ("Feature Licenses"). Abouts and Feature Licenses may be located in any directory of a Download or Module
+including, but not limited to the following locations:
+
+
+ - The top-level (root) directory
+ - Plug-in and Fragment directories
+ - Inside Plug-ins and Fragments packaged as JARs
+ - Sub-directories of the directory named "src" of certain Plug-ins
+ - Feature directories
+
+
+Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license ("Feature Update License") during the
+installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or
+inform you where you can locate them. Feature Update Licenses may be found in the "license" property of files named "feature.properties" found within a Feature.
+Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in
+that directory.
+
+THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE
+OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):
+
+
+
+IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please
+contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.
+
+
+Use of Provisioning Technology
+
+The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse
+ Update Manager ("Provisioning Technology") for the purpose of allowing users to install software, documentation, information and/or
+ other materials (collectively "Installable Software"). This capability is provided with the intent of allowing such users to
+ install, extend and update Eclipse-based products. Information about packaging Installable Software is available at http://eclipse.org/equinox/p2/repository_packaging.html
+ ("Specification").
+
+You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the
+ applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology
+ in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the
+ Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:
+
+
+ - A series of actions may occur ("Provisioning Process") in which a user may execute the Provisioning Technology
+ on a machine ("Target Machine") with the intent of installing, extending or updating the functionality of an Eclipse-based
+ product.
+ - During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be
+ accessed and copied to the Target Machine.
+ - Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable
+ Software ("Installable Software Agreement") and such Installable Software Agreement shall be accessed from the Target
+ Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern
+ the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such
+ indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.
+
+
+Cryptography
+
+Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to
+ another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import,
+ possession, or use, and re-export of encryption software, to see if this is permitted.
+
+Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.
+
+
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/oops.go b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go
new file mode 100644
index 0000000..39630d7
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+func chkerr(e error) {
+ if e != nil {
+ panic(e)
+ }
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options.go b/vendor/github.com/eclipse/paho.mqtt.golang/options.go
new file mode 100644
index 0000000..e96e9ed
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/options.go
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+// Portions copyright © 2018 TIBCO Software Inc.
+
+package mqtt
+
+import (
+ "crypto/tls"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// CredentialsProvider allows the username and password to be updated
+// before reconnecting. It should return the current username and password.
+type CredentialsProvider func() (username string, password string)
+
+// MessageHandler is a callback type which can be set to be
+// executed upon the arrival of messages published to topics
+// to which the client is subscribed.
+type MessageHandler func(Client, Message)
+
+// ConnectionLostHandler is a callback type which can be set to be
+// executed upon an unintended disconnection from the MQTT broker.
+// Disconnects caused by calling Disconnect or ForceDisconnect will
+// not cause an OnConnectionLost callback to execute.
+type ConnectionLostHandler func(Client, error)
+
+// OnConnectHandler is a callback that is called when the client
+// state changes from unconnected/disconnected to connected. Both
+// at initial connection and on reconnection
+type OnConnectHandler func(Client)
+
+// ClientOptions contains configurable options for an Client.
+type ClientOptions struct {
+ Servers []*url.URL
+ ClientID string
+ Username string
+ Password string
+ CredentialsProvider CredentialsProvider
+ CleanSession bool
+ Order bool
+ WillEnabled bool
+ WillTopic string
+ WillPayload []byte
+ WillQos byte
+ WillRetained bool
+ ProtocolVersion uint
+ protocolVersionExplicit bool
+ TLSConfig *tls.Config
+ KeepAlive int64
+ PingTimeout time.Duration
+ ConnectTimeout time.Duration
+ MaxReconnectInterval time.Duration
+ AutoReconnect bool
+ Store Store
+ DefaultPublishHandler MessageHandler
+ OnConnect OnConnectHandler
+ OnConnectionLost ConnectionLostHandler
+ WriteTimeout time.Duration
+ MessageChannelDepth uint
+ ResumeSubs bool
+ HTTPHeaders http.Header
+}
+
+// NewClientOptions will create a new ClientClientOptions type with some
+// default values.
+// Port: 1883
+// CleanSession: True
+// Order: True
+// KeepAlive: 30 (seconds)
+// ConnectTimeout: 30 (seconds)
+// MaxReconnectInterval 10 (minutes)
+// AutoReconnect: True
+func NewClientOptions() *ClientOptions {
+ o := &ClientOptions{
+ Servers: nil,
+ ClientID: "",
+ Username: "",
+ Password: "",
+ CleanSession: true,
+ Order: true,
+ WillEnabled: false,
+ WillTopic: "",
+ WillPayload: nil,
+ WillQos: 0,
+ WillRetained: false,
+ ProtocolVersion: 0,
+ protocolVersionExplicit: false,
+ KeepAlive: 30,
+ PingTimeout: 10 * time.Second,
+ ConnectTimeout: 30 * time.Second,
+ MaxReconnectInterval: 10 * time.Minute,
+ AutoReconnect: true,
+ Store: nil,
+ OnConnect: nil,
+ OnConnectionLost: DefaultConnectionLostHandler,
+ WriteTimeout: 0, // 0 represents timeout disabled
+ MessageChannelDepth: 100,
+ ResumeSubs: false,
+ HTTPHeaders: make(map[string][]string),
+ }
+ return o
+}
+
+// AddBroker adds a broker URI to the list of brokers to be used. The format should be
+// scheme://host:port
+// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname)
+// and "port" is the port on which the broker is accepting connections.
+//
+// Default values for hostname is "127.0.0.1", for schema is "tcp://".
+//
+// An example broker URI would look like: tcp://foobar.com:1883
+func (o *ClientOptions) AddBroker(server string) *ClientOptions {
+ if len(server) > 0 && server[0] == ':' {
+ server = "127.0.0.1" + server
+ }
+ if !strings.Contains(server, "://") {
+ server = "tcp://" + server
+ }
+ brokerURI, err := url.Parse(server)
+ if err != nil {
+ ERROR.Println(CLI, "Failed to parse %q broker address: %s", server, err)
+ return o
+ }
+ o.Servers = append(o.Servers, brokerURI)
+ return o
+}
+
+// SetResumeSubs will enable resuming of stored (un)subscribe messages when connecting
+// but not reconnecting if CleanSession is false. Otherwise these messages are discarded.
+func (o *ClientOptions) SetResumeSubs(resume bool) *ClientOptions {
+ o.ResumeSubs = resume
+ return o
+}
+
+// SetClientID will set the client id to be used by this client when
+// connecting to the MQTT broker. According to the MQTT v3.1 specification,
+// a client id mus be no longer than 23 characters.
+func (o *ClientOptions) SetClientID(id string) *ClientOptions {
+ o.ClientID = id
+ return o
+}
+
+// SetUsername will set the username to be used by this client when connecting
+// to the MQTT broker. Note: without the use of SSL/TLS, this information will
+// be sent in plaintext accross the wire.
+func (o *ClientOptions) SetUsername(u string) *ClientOptions {
+ o.Username = u
+ return o
+}
+
+// SetPassword will set the password to be used by this client when connecting
+// to the MQTT broker. Note: without the use of SSL/TLS, this information will
+// be sent in plaintext accross the wire.
+func (o *ClientOptions) SetPassword(p string) *ClientOptions {
+ o.Password = p
+ return o
+}
+
+// SetCredentialsProvider will set a method to be called by this client when
+// connecting to the MQTT broker that provide the current username and password.
+// Note: without the use of SSL/TLS, this information will be sent
+// in plaintext accross the wire.
+func (o *ClientOptions) SetCredentialsProvider(p CredentialsProvider) *ClientOptions {
+ o.CredentialsProvider = p
+ return o
+}
+
+// SetCleanSession will set the "clean session" flag in the connect message
+// when this client connects to an MQTT broker. By setting this flag, you are
+// indicating that no messages saved by the broker for this client should be
+// delivered. Any messages that were going to be sent by this client before
+// diconnecting previously but didn't will not be sent upon connecting to the
+// broker.
+func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions {
+ o.CleanSession = clean
+ return o
+}
+
+// SetOrderMatters will set the message routing to guarantee order within
+// each QoS level. By default, this value is true. If set to false,
+// this flag indicates that messages can be delivered asynchronously
+// from the client to the application and possibly arrive out of order.
+func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions {
+ o.Order = order
+ return o
+}
+
+// SetTLSConfig will set an SSL/TLS configuration to be used when connecting
+// to an MQTT broker. Please read the official Go documentation for more
+// information.
+func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions {
+ o.TLSConfig = t
+ return o
+}
+
+// SetStore will set the implementation of the Store interface
+// used to provide message persistence in cases where QoS levels
+// QoS_ONE or QoS_TWO are used. If no store is provided, then the
+// client will use MemoryStore by default.
+func (o *ClientOptions) SetStore(s Store) *ClientOptions {
+ o.Store = s
+ return o
+}
+
+// SetKeepAlive will set the amount of time (in seconds) that the client
+// should wait before sending a PING request to the broker. This will
+// allow the client to know that a connection has not been lost with the
+// server.
+func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions {
+ o.KeepAlive = int64(k / time.Second)
+ return o
+}
+
+// SetPingTimeout will set the amount of time (in seconds) that the client
+// will wait after sending a PING request to the broker, before deciding
+// that the connection has been lost. Default is 10 seconds.
+func (o *ClientOptions) SetPingTimeout(k time.Duration) *ClientOptions {
+ o.PingTimeout = k
+ return o
+}
+
+// SetProtocolVersion sets the MQTT version to be used to connect to the
+// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1
+func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions {
+ if (pv >= 3 && pv <= 4) || (pv > 0x80) {
+ o.ProtocolVersion = pv
+ o.protocolVersionExplicit = true
+ }
+ return o
+}
+
+// UnsetWill will cause any set will message to be disregarded.
+func (o *ClientOptions) UnsetWill() *ClientOptions {
+ o.WillEnabled = false
+ return o
+}
+
+// SetWill accepts a string will message to be set. When the client connects,
+// it will give this will message to the broker, which will then publish the
+// provided payload (the will) to any clients that are subscribed to the provided
+// topic.
+func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions {
+ o.SetBinaryWill(topic, []byte(payload), qos, retained)
+ return o
+}
+
+// SetBinaryWill accepts a []byte will message to be set. When the client connects,
+// it will give this will message to the broker, which will then publish the
+// provided payload (the will) to any clients that are subscribed to the provided
+// topic.
+func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions {
+ o.WillEnabled = true
+ o.WillTopic = topic
+ o.WillPayload = payload
+ o.WillQos = qos
+ o.WillRetained = retained
+ return o
+}
+
+// SetDefaultPublishHandler sets the MessageHandler that will be called when a message
+// is received that does not match any known subscriptions.
+func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions {
+ o.DefaultPublishHandler = defaultHandler
+ return o
+}
+
+// SetOnConnectHandler sets the function to be called when the client is connected. Both
+// at initial connection time and upon automatic reconnect.
+func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions {
+ o.OnConnect = onConn
+ return o
+}
+
+// SetConnectionLostHandler will set the OnConnectionLost callback to be executed
+// in the case where the client unexpectedly loses connection with the MQTT broker.
+func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions {
+ o.OnConnectionLost = onLost
+ return o
+}
+
+// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a
+// timeout error. A duration of 0 never times out. Default 30 seconds
+func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions {
+ o.WriteTimeout = t
+ return o
+}
+
+// SetConnectTimeout limits how long the client will wait when trying to open a connection
+// to an MQTT server before timeing out and erroring the attempt. A duration of 0 never times out.
+// Default 30 seconds. Currently only operational on TCP/TLS connections.
+func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions {
+ o.ConnectTimeout = t
+ return o
+}
+
+// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts
+// when connection is lost
+func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions {
+ o.MaxReconnectInterval = t
+ return o
+}
+
+// SetAutoReconnect sets whether the automatic reconnection logic should be used
+// when the connection is lost, even if disabled the ConnectionLostHandler is still
+// called
+func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions {
+ o.AutoReconnect = a
+ return o
+}
+
+// SetMessageChannelDepth sets the size of the internal queue that holds messages while the
+// client is temporairily offline, allowing the application to publish when the client is
+// reconnecting. This setting is only valid if AutoReconnect is set to true, it is otherwise
+// ignored.
+func (o *ClientOptions) SetMessageChannelDepth(s uint) *ClientOptions {
+ o.MessageChannelDepth = s
+ return o
+}
+
+// SetHTTPHeaders sets the additional HTTP headers that will be sent in the WebSocket
+// opening handshake.
+func (o *ClientOptions) SetHTTPHeaders(h http.Header) *ClientOptions {
+ o.HTTPHeaders = h
+ return o
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go
new file mode 100644
index 0000000..60144b9
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "crypto/tls"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// ClientOptionsReader provides an interface for reading ClientOptions after the client has been initialized.
+type ClientOptionsReader struct {
+ options *ClientOptions
+}
+
+//Servers returns a slice of the servers defined in the clientoptions
+func (r *ClientOptionsReader) Servers() []*url.URL {
+ s := make([]*url.URL, len(r.options.Servers))
+
+ for i, u := range r.options.Servers {
+ nu := *u
+ s[i] = &nu
+ }
+
+ return s
+}
+
+//ResumeSubs returns true if resuming stored (un)sub is enabled
+func (r *ClientOptionsReader) ResumeSubs() bool {
+ s := r.options.ResumeSubs
+ return s
+}
+
+//ClientID returns the set client id
+func (r *ClientOptionsReader) ClientID() string {
+ s := r.options.ClientID
+ return s
+}
+
+//Username returns the set username
+func (r *ClientOptionsReader) Username() string {
+ s := r.options.Username
+ return s
+}
+
+//Password returns the set password
+func (r *ClientOptionsReader) Password() string {
+ s := r.options.Password
+ return s
+}
+
+//CleanSession returns whether Cleansession is set
+func (r *ClientOptionsReader) CleanSession() bool {
+ s := r.options.CleanSession
+ return s
+}
+
+func (r *ClientOptionsReader) Order() bool {
+ s := r.options.Order
+ return s
+}
+
+func (r *ClientOptionsReader) WillEnabled() bool {
+ s := r.options.WillEnabled
+ return s
+}
+
+func (r *ClientOptionsReader) WillTopic() string {
+ s := r.options.WillTopic
+ return s
+}
+
+func (r *ClientOptionsReader) WillPayload() []byte {
+ s := r.options.WillPayload
+ return s
+}
+
+func (r *ClientOptionsReader) WillQos() byte {
+ s := r.options.WillQos
+ return s
+}
+
+func (r *ClientOptionsReader) WillRetained() bool {
+ s := r.options.WillRetained
+ return s
+}
+
+func (r *ClientOptionsReader) ProtocolVersion() uint {
+ s := r.options.ProtocolVersion
+ return s
+}
+
+func (r *ClientOptionsReader) TLSConfig() *tls.Config {
+ s := r.options.TLSConfig
+ return s
+}
+
+func (r *ClientOptionsReader) KeepAlive() time.Duration {
+ s := time.Duration(r.options.KeepAlive * int64(time.Second))
+ return s
+}
+
+func (r *ClientOptionsReader) PingTimeout() time.Duration {
+ s := r.options.PingTimeout
+ return s
+}
+
+func (r *ClientOptionsReader) ConnectTimeout() time.Duration {
+ s := r.options.ConnectTimeout
+ return s
+}
+
+func (r *ClientOptionsReader) MaxReconnectInterval() time.Duration {
+ s := r.options.MaxReconnectInterval
+ return s
+}
+
+func (r *ClientOptionsReader) AutoReconnect() bool {
+ s := r.options.AutoReconnect
+ return s
+}
+
+func (r *ClientOptionsReader) WriteTimeout() time.Duration {
+ s := r.options.WriteTimeout
+ return s
+}
+
+func (r *ClientOptionsReader) MessageChannelDepth() uint {
+ s := r.options.MessageChannelDepth
+ return s
+}
+
+func (r *ClientOptionsReader) HTTPHeaders() http.Header {
+ h := r.options.HTTPHeaders
+ return h
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go
new file mode 100644
index 0000000..25cf30f
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go
@@ -0,0 +1,55 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+//ConnackPacket is an internal representation of the fields of the
+//Connack MQTT packet
+type ConnackPacket struct {
+ FixedHeader
+ SessionPresent bool
+ ReturnCode byte
+}
+
+func (ca *ConnackPacket) String() string {
+ str := fmt.Sprintf("%s", ca.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("sessionpresent: %t returncode: %d", ca.SessionPresent, ca.ReturnCode)
+ return str
+}
+
+func (ca *ConnackPacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+
+ body.WriteByte(boolToByte(ca.SessionPresent))
+ body.WriteByte(ca.ReturnCode)
+ ca.FixedHeader.RemainingLength = 2
+ packet := ca.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (ca *ConnackPacket) Unpack(b io.Reader) error {
+ flags, err := decodeByte(b)
+ if err != nil {
+ return err
+ }
+ ca.SessionPresent = 1&flags > 0
+ ca.ReturnCode, err = decodeByte(b)
+
+ return err
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (ca *ConnackPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go
new file mode 100644
index 0000000..cb03ebc
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go
@@ -0,0 +1,154 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+//ConnectPacket is an internal representation of the fields of the
+//Connect MQTT packet
+type ConnectPacket struct {
+ FixedHeader
+ ProtocolName string
+ ProtocolVersion byte
+ CleanSession bool
+ WillFlag bool
+ WillQos byte
+ WillRetain bool
+ UsernameFlag bool
+ PasswordFlag bool
+ ReservedBit byte
+ Keepalive uint16
+
+ ClientIdentifier string
+ WillTopic string
+ WillMessage []byte
+ Username string
+ Password []byte
+}
+
+func (c *ConnectPacket) String() string {
+ str := fmt.Sprintf("%s", c.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password)
+ return str
+}
+
+func (c *ConnectPacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+
+ body.Write(encodeString(c.ProtocolName))
+ body.WriteByte(c.ProtocolVersion)
+ body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7)
+ body.Write(encodeUint16(c.Keepalive))
+ body.Write(encodeString(c.ClientIdentifier))
+ if c.WillFlag {
+ body.Write(encodeString(c.WillTopic))
+ body.Write(encodeBytes(c.WillMessage))
+ }
+ if c.UsernameFlag {
+ body.Write(encodeString(c.Username))
+ }
+ if c.PasswordFlag {
+ body.Write(encodeBytes(c.Password))
+ }
+ c.FixedHeader.RemainingLength = body.Len()
+ packet := c.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (c *ConnectPacket) Unpack(b io.Reader) error {
+ var err error
+ c.ProtocolName, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+ c.ProtocolVersion, err = decodeByte(b)
+ if err != nil {
+ return err
+ }
+ options, err := decodeByte(b)
+ if err != nil {
+ return err
+ }
+ c.ReservedBit = 1 & options
+ c.CleanSession = 1&(options>>1) > 0
+ c.WillFlag = 1&(options>>2) > 0
+ c.WillQos = 3 & (options >> 3)
+ c.WillRetain = 1&(options>>5) > 0
+ c.PasswordFlag = 1&(options>>6) > 0
+ c.UsernameFlag = 1&(options>>7) > 0
+ c.Keepalive, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+ c.ClientIdentifier, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+ if c.WillFlag {
+ c.WillTopic, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+ c.WillMessage, err = decodeBytes(b)
+ if err != nil {
+ return err
+ }
+ }
+ if c.UsernameFlag {
+ c.Username, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+ }
+ if c.PasswordFlag {
+ c.Password, err = decodeBytes(b)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//Validate performs validation of the fields of a Connect packet
+func (c *ConnectPacket) Validate() byte {
+ if c.PasswordFlag && !c.UsernameFlag {
+ return ErrRefusedBadUsernameOrPassword
+ }
+ if c.ReservedBit != 0 {
+ //Bad reserved bit
+ return ErrProtocolViolation
+ }
+ if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) {
+ //Mismatched or unsupported protocol version
+ return ErrRefusedBadProtocolVersion
+ }
+ if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" {
+ //Bad protocol name
+ return ErrProtocolViolation
+ }
+ if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 {
+ //Bad size field
+ return ErrProtocolViolation
+ }
+ if len(c.ClientIdentifier) == 0 && !c.CleanSession {
+ //Bad client identifier
+ return ErrRefusedIDRejected
+ }
+ return Accepted
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (c *ConnectPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go
new file mode 100644
index 0000000..e5c1869
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go
@@ -0,0 +1,36 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+//DisconnectPacket is an internal representation of the fields of the
+//Disconnect MQTT packet
+type DisconnectPacket struct {
+ FixedHeader
+}
+
+func (d *DisconnectPacket) String() string {
+ str := fmt.Sprintf("%s", d.FixedHeader)
+ return str
+}
+
+func (d *DisconnectPacket) Write(w io.Writer) error {
+ packet := d.FixedHeader.pack()
+ _, err := packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (d *DisconnectPacket) Unpack(b io.Reader) error {
+ return nil
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (d *DisconnectPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go
new file mode 100644
index 0000000..42eeb46
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go
@@ -0,0 +1,346 @@
+package packets
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+//ControlPacket defines the interface for structs intended to hold
+//decoded MQTT packets, either from being read or before being
+//written
+type ControlPacket interface {
+ Write(io.Writer) error
+ Unpack(io.Reader) error
+ String() string
+ Details() Details
+}
+
+//PacketNames maps the constants for each of the MQTT packet types
+//to a string representation of their name.
+var PacketNames = map[uint8]string{
+ 1: "CONNECT",
+ 2: "CONNACK",
+ 3: "PUBLISH",
+ 4: "PUBACK",
+ 5: "PUBREC",
+ 6: "PUBREL",
+ 7: "PUBCOMP",
+ 8: "SUBSCRIBE",
+ 9: "SUBACK",
+ 10: "UNSUBSCRIBE",
+ 11: "UNSUBACK",
+ 12: "PINGREQ",
+ 13: "PINGRESP",
+ 14: "DISCONNECT",
+}
+
+//Below are the constants assigned to each of the MQTT packet types
+const (
+ Connect = 1
+ Connack = 2
+ Publish = 3
+ Puback = 4
+ Pubrec = 5
+ Pubrel = 6
+ Pubcomp = 7
+ Subscribe = 8
+ Suback = 9
+ Unsubscribe = 10
+ Unsuback = 11
+ Pingreq = 12
+ Pingresp = 13
+ Disconnect = 14
+)
+
+//Below are the const definitions for error codes returned by
+//Connect()
+const (
+ Accepted = 0x00
+ ErrRefusedBadProtocolVersion = 0x01
+ ErrRefusedIDRejected = 0x02
+ ErrRefusedServerUnavailable = 0x03
+ ErrRefusedBadUsernameOrPassword = 0x04
+ ErrRefusedNotAuthorised = 0x05
+ ErrNetworkError = 0xFE
+ ErrProtocolViolation = 0xFF
+)
+
+//ConnackReturnCodes is a map of the error codes constants for Connect()
+//to a string representation of the error
+var ConnackReturnCodes = map[uint8]string{
+ 0: "Connection Accepted",
+ 1: "Connection Refused: Bad Protocol Version",
+ 2: "Connection Refused: Client Identifier Rejected",
+ 3: "Connection Refused: Server Unavailable",
+ 4: "Connection Refused: Username or Password in unknown format",
+ 5: "Connection Refused: Not Authorised",
+ 254: "Connection Error",
+ 255: "Connection Refused: Protocol Violation",
+}
+
+//ConnErrors is a map of the errors codes constants for Connect()
+//to a Go error
+var ConnErrors = map[byte]error{
+ Accepted: nil,
+ ErrRefusedBadProtocolVersion: errors.New("Unnacceptable protocol version"),
+ ErrRefusedIDRejected: errors.New("Identifier rejected"),
+ ErrRefusedServerUnavailable: errors.New("Server Unavailable"),
+ ErrRefusedBadUsernameOrPassword: errors.New("Bad user name or password"),
+ ErrRefusedNotAuthorised: errors.New("Not Authorized"),
+ ErrNetworkError: errors.New("Network Error"),
+ ErrProtocolViolation: errors.New("Protocol Violation"),
+}
+
+//ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts
+//to read an MQTT packet from the stream. It returns a ControlPacket
+//representing the decoded MQTT packet and an error. One of these returns will
+//always be nil, a nil ControlPacket indicating an error occurred.
+func ReadPacket(r io.Reader) (ControlPacket, error) {
+ var fh FixedHeader
+ b := make([]byte, 1)
+
+ _, err := io.ReadFull(r, b)
+ if err != nil {
+ return nil, err
+ }
+
+ err = fh.unpack(b[0], r)
+ if err != nil {
+ return nil, err
+ }
+
+ cp, err := NewControlPacketWithHeader(fh)
+ if err != nil {
+ return nil, err
+ }
+
+ packetBytes := make([]byte, fh.RemainingLength)
+ n, err := io.ReadFull(r, packetBytes)
+ if err != nil {
+ return nil, err
+ }
+ if n != fh.RemainingLength {
+ return nil, errors.New("Failed to read expected data")
+ }
+
+ err = cp.Unpack(bytes.NewBuffer(packetBytes))
+ return cp, err
+}
+
+//NewControlPacket is used to create a new ControlPacket of the type specified
+//by packetType, this is usually done by reference to the packet type constants
+//defined in packets.go. The newly created ControlPacket is empty and a pointer
+//is returned.
+func NewControlPacket(packetType byte) ControlPacket {
+ switch packetType {
+ case Connect:
+ return &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}}
+ case Connack:
+ return &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}}
+ case Disconnect:
+ return &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}}
+ case Publish:
+ return &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}}
+ case Puback:
+ return &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}}
+ case Pubrec:
+ return &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}}
+ case Pubrel:
+ return &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}}
+ case Pubcomp:
+ return &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}}
+ case Subscribe:
+ return &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}}
+ case Suback:
+ return &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}}
+ case Unsubscribe:
+ return &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}}
+ case Unsuback:
+ return &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}}
+ case Pingreq:
+ return &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}}
+ case Pingresp:
+ return &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}}
+ }
+ return nil
+}
+
+//NewControlPacketWithHeader is used to create a new ControlPacket of the type
+//specified within the FixedHeader that is passed to the function.
+//The newly created ControlPacket is empty and a pointer is returned.
+func NewControlPacketWithHeader(fh FixedHeader) (ControlPacket, error) {
+ switch fh.MessageType {
+ case Connect:
+ return &ConnectPacket{FixedHeader: fh}, nil
+ case Connack:
+ return &ConnackPacket{FixedHeader: fh}, nil
+ case Disconnect:
+ return &DisconnectPacket{FixedHeader: fh}, nil
+ case Publish:
+ return &PublishPacket{FixedHeader: fh}, nil
+ case Puback:
+ return &PubackPacket{FixedHeader: fh}, nil
+ case Pubrec:
+ return &PubrecPacket{FixedHeader: fh}, nil
+ case Pubrel:
+ return &PubrelPacket{FixedHeader: fh}, nil
+ case Pubcomp:
+ return &PubcompPacket{FixedHeader: fh}, nil
+ case Subscribe:
+ return &SubscribePacket{FixedHeader: fh}, nil
+ case Suback:
+ return &SubackPacket{FixedHeader: fh}, nil
+ case Unsubscribe:
+ return &UnsubscribePacket{FixedHeader: fh}, nil
+ case Unsuback:
+ return &UnsubackPacket{FixedHeader: fh}, nil
+ case Pingreq:
+ return &PingreqPacket{FixedHeader: fh}, nil
+ case Pingresp:
+ return &PingrespPacket{FixedHeader: fh}, nil
+ }
+ return nil, fmt.Errorf("unsupported packet type 0x%x", fh.MessageType)
+}
+
+//Details struct returned by the Details() function called on
+//ControlPackets to present details of the Qos and MessageID
+//of the ControlPacket
+type Details struct {
+ Qos byte
+ MessageID uint16
+}
+
+//FixedHeader is a struct to hold the decoded information from
+//the fixed header of an MQTT ControlPacket
+type FixedHeader struct {
+ MessageType byte
+ Dup bool
+ Qos byte
+ Retain bool
+ RemainingLength int
+}
+
+func (fh FixedHeader) String() string {
+ return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength)
+}
+
+func boolToByte(b bool) byte {
+ switch b {
+ case true:
+ return 1
+ default:
+ return 0
+ }
+}
+
+func (fh *FixedHeader) pack() bytes.Buffer {
+ var header bytes.Buffer
+ header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain))
+ header.Write(encodeLength(fh.RemainingLength))
+ return header
+}
+
+func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) error {
+ fh.MessageType = typeAndFlags >> 4
+ fh.Dup = (typeAndFlags>>3)&0x01 > 0
+ fh.Qos = (typeAndFlags >> 1) & 0x03
+ fh.Retain = typeAndFlags&0x01 > 0
+
+ var err error
+ fh.RemainingLength, err = decodeLength(r)
+ return err
+}
+
+func decodeByte(b io.Reader) (byte, error) {
+ num := make([]byte, 1)
+ _, err := b.Read(num)
+ if err != nil {
+ return 0, err
+ }
+
+ return num[0], nil
+}
+
+func decodeUint16(b io.Reader) (uint16, error) {
+ num := make([]byte, 2)
+ _, err := b.Read(num)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint16(num), nil
+}
+
+func encodeUint16(num uint16) []byte {
+ bytes := make([]byte, 2)
+ binary.BigEndian.PutUint16(bytes, num)
+ return bytes
+}
+
+func encodeString(field string) []byte {
+ return encodeBytes([]byte(field))
+}
+
+func decodeString(b io.Reader) (string, error) {
+ buf, err := decodeBytes(b)
+ return string(buf), err
+}
+
+func decodeBytes(b io.Reader) ([]byte, error) {
+ fieldLength, err := decodeUint16(b)
+ if err != nil {
+ return nil, err
+ }
+
+ field := make([]byte, fieldLength)
+ _, err = b.Read(field)
+ if err != nil {
+ return nil, err
+ }
+
+ return field, nil
+}
+
+func encodeBytes(field []byte) []byte {
+ fieldLength := make([]byte, 2)
+ binary.BigEndian.PutUint16(fieldLength, uint16(len(field)))
+ return append(fieldLength, field...)
+}
+
+func encodeLength(length int) []byte {
+ var encLength []byte
+ for {
+ digit := byte(length % 128)
+ length /= 128
+ if length > 0 {
+ digit |= 0x80
+ }
+ encLength = append(encLength, digit)
+ if length == 0 {
+ break
+ }
+ }
+ return encLength
+}
+
+func decodeLength(r io.Reader) (int, error) {
+ var rLength uint32
+ var multiplier uint32
+ b := make([]byte, 1)
+ for multiplier < 27 { //fix: Infinite '(digit & 128) == 1' will cause the dead loop
+ _, err := io.ReadFull(r, b)
+ if err != nil {
+ return 0, err
+ }
+
+ digit := b[0]
+ rLength |= uint32(digit&127) << multiplier
+ if (digit & 128) == 0 {
+ break
+ }
+ multiplier += 7
+ }
+ return int(rLength), nil
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go
new file mode 100644
index 0000000..5c3e88f
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go
@@ -0,0 +1,36 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+//PingreqPacket is an internal representation of the fields of the
+//Pingreq MQTT packet
+type PingreqPacket struct {
+ FixedHeader
+}
+
+func (pr *PingreqPacket) String() string {
+ str := fmt.Sprintf("%s", pr.FixedHeader)
+ return str
+}
+
+func (pr *PingreqPacket) Write(w io.Writer) error {
+ packet := pr.FixedHeader.pack()
+ _, err := packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (pr *PingreqPacket) Unpack(b io.Reader) error {
+ return nil
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (pr *PingreqPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go
new file mode 100644
index 0000000..39ebc00
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go
@@ -0,0 +1,36 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+//PingrespPacket is an internal representation of the fields of the
+//Pingresp MQTT packet
+type PingrespPacket struct {
+ FixedHeader
+}
+
+func (pr *PingrespPacket) String() string {
+ str := fmt.Sprintf("%s", pr.FixedHeader)
+ return str
+}
+
+func (pr *PingrespPacket) Write(w io.Writer) error {
+ packet := pr.FixedHeader.pack()
+ _, err := packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (pr *PingrespPacket) Unpack(b io.Reader) error {
+ return nil
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (pr *PingrespPacket) Details() Details {
+ return Details{Qos: 0, MessageID: 0}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go
new file mode 100644
index 0000000..7c0cd7e
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go
@@ -0,0 +1,45 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+//PubackPacket is an internal representation of the fields of the
+//Puback MQTT packet
+type PubackPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (pa *PubackPacket) String() string {
+ str := fmt.Sprintf("%s", pa.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("MessageID: %d", pa.MessageID)
+ return str
+}
+
+func (pa *PubackPacket) Write(w io.Writer) error {
+ var err error
+ pa.FixedHeader.RemainingLength = 2
+ packet := pa.FixedHeader.pack()
+ packet.Write(encodeUint16(pa.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (pa *PubackPacket) Unpack(b io.Reader) error {
+ var err error
+ pa.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (pa *PubackPacket) Details() Details {
+ return Details{Qos: pa.Qos, MessageID: pa.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go
new file mode 100644
index 0000000..4f6f6e2
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go
@@ -0,0 +1,45 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+//PubcompPacket is an internal representation of the fields of the
+//Pubcomp MQTT packet
+type PubcompPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (pc *PubcompPacket) String() string {
+ str := fmt.Sprintf("%s", pc.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("MessageID: %d", pc.MessageID)
+ return str
+}
+
+func (pc *PubcompPacket) Write(w io.Writer) error {
+ var err error
+ pc.FixedHeader.RemainingLength = 2
+ packet := pc.FixedHeader.pack()
+ packet.Write(encodeUint16(pc.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (pc *PubcompPacket) Unpack(b io.Reader) error {
+ var err error
+ pc.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (pc *PubcompPacket) Details() Details {
+ return Details{Qos: pc.Qos, MessageID: pc.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go
new file mode 100644
index 0000000..adc9adb
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go
@@ -0,0 +1,88 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+//PublishPacket is an internal representation of the fields of the
+//Publish MQTT packet
+type PublishPacket struct {
+ FixedHeader
+ TopicName string
+ MessageID uint16
+ Payload []byte
+}
+
+func (p *PublishPacket) String() string {
+ str := fmt.Sprintf("%s", p.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("topicName: %s MessageID: %d", p.TopicName, p.MessageID)
+ str += " "
+ str += fmt.Sprintf("payload: %s", string(p.Payload))
+ return str
+}
+
+func (p *PublishPacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+
+ body.Write(encodeString(p.TopicName))
+ if p.Qos > 0 {
+ body.Write(encodeUint16(p.MessageID))
+ }
+ p.FixedHeader.RemainingLength = body.Len() + len(p.Payload)
+ packet := p.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ packet.Write(p.Payload)
+ _, err = w.Write(packet.Bytes())
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (p *PublishPacket) Unpack(b io.Reader) error {
+ var payloadLength = p.FixedHeader.RemainingLength
+ var err error
+ p.TopicName, err = decodeString(b)
+ if err != nil {
+ return err
+ }
+
+ if p.Qos > 0 {
+ p.MessageID, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+ payloadLength -= len(p.TopicName) + 4
+ } else {
+ payloadLength -= len(p.TopicName) + 2
+ }
+ if payloadLength < 0 {
+ return fmt.Errorf("Error unpacking publish, payload length < 0")
+ }
+ p.Payload = make([]byte, payloadLength)
+ _, err = b.Read(p.Payload)
+
+ return err
+}
+
+//Copy creates a new PublishPacket with the same topic and payload
+//but an empty fixed header, useful for when you want to deliver
+//a message with different properties such as Qos but the same
+//content
+func (p *PublishPacket) Copy() *PublishPacket {
+ newP := NewControlPacket(Publish).(*PublishPacket)
+ newP.TopicName = p.TopicName
+ newP.Payload = p.Payload
+
+ return newP
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (p *PublishPacket) Details() Details {
+ return Details{Qos: p.Qos, MessageID: p.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go
new file mode 100644
index 0000000..483372b
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go
@@ -0,0 +1,45 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+//PubrecPacket is an internal representation of the fields of the
+//Pubrec MQTT packet
+type PubrecPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (pr *PubrecPacket) String() string {
+ str := fmt.Sprintf("%s", pr.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("MessageID: %d", pr.MessageID)
+ return str
+}
+
+func (pr *PubrecPacket) Write(w io.Writer) error {
+ var err error
+ pr.FixedHeader.RemainingLength = 2
+ packet := pr.FixedHeader.pack()
+ packet.Write(encodeUint16(pr.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (pr *PubrecPacket) Unpack(b io.Reader) error {
+ var err error
+ pr.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (pr *PubrecPacket) Details() Details {
+ return Details{Qos: pr.Qos, MessageID: pr.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go
new file mode 100644
index 0000000..8590fd9
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go
@@ -0,0 +1,45 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+//PubrelPacket is an internal representation of the fields of the
+//Pubrel MQTT packet
+type PubrelPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (pr *PubrelPacket) String() string {
+ str := fmt.Sprintf("%s", pr.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("MessageID: %d", pr.MessageID)
+ return str
+}
+
+func (pr *PubrelPacket) Write(w io.Writer) error {
+ var err error
+ pr.FixedHeader.RemainingLength = 2
+ packet := pr.FixedHeader.pack()
+ packet.Write(encodeUint16(pr.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (pr *PubrelPacket) Unpack(b io.Reader) error {
+ var err error
+ pr.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (pr *PubrelPacket) Details() Details {
+ return Details{Qos: pr.Qos, MessageID: pr.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go
new file mode 100644
index 0000000..fc05724
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go
@@ -0,0 +1,60 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+//SubackPacket is an internal representation of the fields of the
+//Suback MQTT packet
+type SubackPacket struct {
+ FixedHeader
+ MessageID uint16
+ ReturnCodes []byte
+}
+
+func (sa *SubackPacket) String() string {
+ str := fmt.Sprintf("%s", sa.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("MessageID: %d", sa.MessageID)
+ return str
+}
+
+func (sa *SubackPacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+ body.Write(encodeUint16(sa.MessageID))
+ body.Write(sa.ReturnCodes)
+ sa.FixedHeader.RemainingLength = body.Len()
+ packet := sa.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (sa *SubackPacket) Unpack(b io.Reader) error {
+ var qosBuffer bytes.Buffer
+ var err error
+ sa.MessageID, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+
+ _, err = qosBuffer.ReadFrom(b)
+ if err != nil {
+ return err
+ }
+ sa.ReturnCodes = qosBuffer.Bytes()
+
+ return nil
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (sa *SubackPacket) Details() Details {
+ return Details{Qos: 0, MessageID: sa.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go
new file mode 100644
index 0000000..0787ce0
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go
@@ -0,0 +1,72 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+//SubscribePacket is an internal representation of the fields of the
+//Subscribe MQTT packet
+type SubscribePacket struct {
+ FixedHeader
+ MessageID uint16
+ Topics []string
+ Qoss []byte
+}
+
+func (s *SubscribePacket) String() string {
+ str := fmt.Sprintf("%s", s.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("MessageID: %d topics: %s", s.MessageID, s.Topics)
+ return str
+}
+
+func (s *SubscribePacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+
+ body.Write(encodeUint16(s.MessageID))
+ for i, topic := range s.Topics {
+ body.Write(encodeString(topic))
+ body.WriteByte(s.Qoss[i])
+ }
+ s.FixedHeader.RemainingLength = body.Len()
+ packet := s.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (s *SubscribePacket) Unpack(b io.Reader) error {
+ var err error
+ s.MessageID, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+ payloadLength := s.FixedHeader.RemainingLength - 2
+ for payloadLength > 0 {
+ topic, err := decodeString(b)
+ if err != nil {
+ return err
+ }
+ s.Topics = append(s.Topics, topic)
+ qos, err := decodeByte(b)
+ if err != nil {
+ return err
+ }
+ s.Qoss = append(s.Qoss, qos)
+ payloadLength -= 2 + len(topic) + 1 //2 bytes of string length, plus string, plus 1 byte for Qos
+ }
+
+ return nil
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (s *SubscribePacket) Details() Details {
+ return Details{Qos: 1, MessageID: s.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go
new file mode 100644
index 0000000..4b40c27
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go
@@ -0,0 +1,45 @@
+package packets
+
+import (
+ "fmt"
+ "io"
+)
+
+//UnsubackPacket is an internal representation of the fields of the
+//Unsuback MQTT packet
+type UnsubackPacket struct {
+ FixedHeader
+ MessageID uint16
+}
+
+func (ua *UnsubackPacket) String() string {
+ str := fmt.Sprintf("%s", ua.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("MessageID: %d", ua.MessageID)
+ return str
+}
+
+func (ua *UnsubackPacket) Write(w io.Writer) error {
+ var err error
+ ua.FixedHeader.RemainingLength = 2
+ packet := ua.FixedHeader.pack()
+ packet.Write(encodeUint16(ua.MessageID))
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (ua *UnsubackPacket) Unpack(b io.Reader) error {
+ var err error
+ ua.MessageID, err = decodeUint16(b)
+
+ return err
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (ua *UnsubackPacket) Details() Details {
+ return Details{Qos: 0, MessageID: ua.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go
new file mode 100644
index 0000000..2012c31
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go
@@ -0,0 +1,59 @@
+package packets
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+//UnsubscribePacket is an internal representation of the fields of the
+//Unsubscribe MQTT packet
+type UnsubscribePacket struct {
+ FixedHeader
+ MessageID uint16
+ Topics []string
+}
+
+func (u *UnsubscribePacket) String() string {
+ str := fmt.Sprintf("%s", u.FixedHeader)
+ str += " "
+ str += fmt.Sprintf("MessageID: %d", u.MessageID)
+ return str
+}
+
+func (u *UnsubscribePacket) Write(w io.Writer) error {
+ var body bytes.Buffer
+ var err error
+ body.Write(encodeUint16(u.MessageID))
+ for _, topic := range u.Topics {
+ body.Write(encodeString(topic))
+ }
+ u.FixedHeader.RemainingLength = body.Len()
+ packet := u.FixedHeader.pack()
+ packet.Write(body.Bytes())
+ _, err = packet.WriteTo(w)
+
+ return err
+}
+
+//Unpack decodes the details of a ControlPacket after the fixed
+//header has been read
+func (u *UnsubscribePacket) Unpack(b io.Reader) error {
+ var err error
+ u.MessageID, err = decodeUint16(b)
+ if err != nil {
+ return err
+ }
+
+ for topic, err := decodeString(b); err == nil && topic != ""; topic, err = decodeString(b) {
+ u.Topics = append(u.Topics, topic)
+ }
+
+ return err
+}
+
+//Details returns a Details struct containing the Qos and
+//MessageID of this ControlPacket
+func (u *UnsubscribePacket) Details() Details {
+ return Details{Qos: 1, MessageID: u.MessageID}
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/ping.go b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go
new file mode 100644
index 0000000..dcbcb1d
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "errors"
+ "sync/atomic"
+ "time"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+func keepalive(c *client) {
+ defer c.workers.Done()
+ DEBUG.Println(PNG, "keepalive starting")
+ var checkInterval int64
+ var pingSent time.Time
+
+ if c.options.KeepAlive > 10 {
+ checkInterval = 5
+ } else {
+ checkInterval = c.options.KeepAlive / 2
+ }
+
+ intervalTicker := time.NewTicker(time.Duration(checkInterval * int64(time.Second)))
+ defer intervalTicker.Stop()
+
+ for {
+ select {
+ case <-c.stop:
+ DEBUG.Println(PNG, "keepalive stopped")
+ return
+ case <-intervalTicker.C:
+ lastSent := c.lastSent.Load().(time.Time)
+ lastReceived := c.lastReceived.Load().(time.Time)
+
+ DEBUG.Println(PNG, "ping check", time.Since(lastSent).Seconds())
+ if time.Since(lastSent) >= time.Duration(c.options.KeepAlive*int64(time.Second)) || time.Since(lastReceived) >= time.Duration(c.options.KeepAlive*int64(time.Second)) {
+ if atomic.LoadInt32(&c.pingOutstanding) == 0 {
+ DEBUG.Println(PNG, "keepalive sending ping")
+ ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
+ //We don't want to wait behind large messages being sent, the Write call
+ //will block until it it able to send the packet.
+ atomic.StoreInt32(&c.pingOutstanding, 1)
+ ping.Write(c.conn)
+ c.lastSent.Store(time.Now())
+ pingSent = time.Now()
+ }
+ }
+ if atomic.LoadInt32(&c.pingOutstanding) > 0 && time.Now().Sub(pingSent) >= c.options.PingTimeout {
+ CRITICAL.Println(PNG, "pingresp not received, disconnecting")
+ c.errors <- errors.New("pingresp not received, disconnecting")
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/router.go b/vendor/github.com/eclipse/paho.mqtt.golang/router.go
new file mode 100644
index 0000000..7b4e8f8
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/router.go
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "container/list"
+ "strings"
+ "sync"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// route is a type which associates MQTT Topic strings with a
+// callback to be executed upon the arrival of a message associated
+// with a subscription to that topic.
+type route struct {
+ topic string
+ callback MessageHandler
+}
+
+// match takes a slice of strings which represent the route being tested having been split on '/'
+// separators, and a slice of strings representing the topic string in the published message, similarly
+// split.
+// The function determines if the topic string matches the route according to the MQTT topic rules
+// and returns a boolean of the outcome
+func match(route []string, topic []string) bool {
+ if len(route) == 0 {
+ if len(topic) == 0 {
+ return true
+ }
+ return false
+ }
+
+ if len(topic) == 0 {
+ if route[0] == "#" {
+ return true
+ }
+ return false
+ }
+
+ if route[0] == "#" {
+ return true
+ }
+
+ if (route[0] == "+") || (route[0] == topic[0]) {
+ return match(route[1:], topic[1:])
+ }
+ return false
+}
+
+func routeIncludesTopic(route, topic string) bool {
+ return match(routeSplit(route), strings.Split(topic, "/"))
+}
+
+// removes $share and sharename when splitting the route to allow
+// shared subscription routes to correctly match the topic
+func routeSplit(route string) []string {
+ var result []string
+ if strings.HasPrefix(route, "$share") {
+ result = strings.Split(route, "/")[2:]
+ } else {
+ result = strings.Split(route, "/")
+ }
+ return result
+}
+
+// match takes the topic string of the published message and does a basic compare to the
+// string of the current Route, if they match it returns true
+func (r *route) match(topic string) bool {
+ return r.topic == topic || routeIncludesTopic(r.topic, topic)
+}
+
+type router struct {
+ sync.RWMutex
+ routes *list.List
+ defaultHandler MessageHandler
+ messages chan *packets.PublishPacket
+ stop chan bool
+}
+
+// newRouter returns a new instance of a Router and channel which can be used to tell the Router
+// to stop
+func newRouter() (*router, chan bool) {
+ router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket), stop: make(chan bool)}
+ stop := router.stop
+ return router, stop
+}
+
+// addRoute takes a topic string and MessageHandler callback. It looks in the current list of
+// routes to see if there is already a matching Route. If there is it replaces the current
+// callback with the new one. If not it add a new entry to the list of Routes.
+func (r *router) addRoute(topic string, callback MessageHandler) {
+ r.Lock()
+ defer r.Unlock()
+ for e := r.routes.Front(); e != nil; e = e.Next() {
+ if e.Value.(*route).match(topic) {
+ r := e.Value.(*route)
+ r.callback = callback
+ return
+ }
+ }
+ r.routes.PushBack(&route{topic: topic, callback: callback})
+}
+
+// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If
+// found it removes the Route from the list.
+func (r *router) deleteRoute(topic string) {
+ r.Lock()
+ defer r.Unlock()
+ for e := r.routes.Front(); e != nil; e = e.Next() {
+ if e.Value.(*route).match(topic) {
+ r.routes.Remove(e)
+ return
+ }
+ }
+}
+
+// setDefaultHandler assigns a default callback that will be called if no matching Route
+// is found for an incoming Publish.
+func (r *router) setDefaultHandler(handler MessageHandler) {
+ r.Lock()
+ defer r.Unlock()
+ r.defaultHandler = handler
+}
+
+// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that
+// takes messages off the channel, matches them against the internal route list and calls the
+// associated callback (or the defaultHandler, if one exists and no other route matched). If
+// anything is sent down the stop channel the function will end.
+func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *client) {
+ go func() {
+ for {
+ select {
+ case message := <-messages:
+ sent := false
+ r.RLock()
+ m := messageFromPublish(message, client.ackFunc(message))
+ handlers := []MessageHandler{}
+ for e := r.routes.Front(); e != nil; e = e.Next() {
+ if e.Value.(*route).match(message.TopicName) {
+ if order {
+ handlers = append(handlers, e.Value.(*route).callback)
+ } else {
+ hd := e.Value.(*route).callback
+ go func() {
+ hd(client, m)
+ m.Ack()
+ }()
+ }
+ sent = true
+ }
+ }
+ if !sent && r.defaultHandler != nil {
+ if order {
+ handlers = append(handlers, r.defaultHandler)
+ } else {
+ go func() {
+ r.defaultHandler(client, m)
+ m.Ack()
+ }()
+ }
+ }
+ r.RUnlock()
+ for _, handler := range handlers {
+ func() {
+ handler(client, m)
+ m.Ack()
+ }()
+ }
+ case <-r.stop:
+ return
+ }
+ }
+ }()
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/store.go b/vendor/github.com/eclipse/paho.mqtt.golang/store.go
new file mode 100644
index 0000000..24a76b7
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/store.go
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+const (
+ inboundPrefix = "i."
+ outboundPrefix = "o."
+)
+
+// Store is an interface which can be used to provide implementations
+// for message persistence.
+// Because we may have to store distinct messages with the same
+// message ID, we need a unique key for each message. This is
+// possible by prepending "i." or "o." to each message id
+type Store interface {
+ Open()
+ Put(key string, message packets.ControlPacket)
+ Get(key string) packets.ControlPacket
+ All() []string
+ Del(key string)
+ Close()
+ Reset()
+}
+
+// A key MUST have the form "X.[messageid]"
+// where X is 'i' or 'o'
+func mIDFromKey(key string) uint16 {
+ s := key[2:]
+ i, err := strconv.Atoi(s)
+ chkerr(err)
+ return uint16(i)
+}
+
+// Return true if key prefix is outbound
+func isKeyOutbound(key string) bool {
+ return key[:2] == outboundPrefix
+}
+
+// Return true if key prefix is inbound
+func isKeyInbound(key string) bool {
+ return key[:2] == inboundPrefix
+}
+
+// Return a string of the form "i.[id]"
+func inboundKeyFromMID(id uint16) string {
+ return fmt.Sprintf("%s%d", inboundPrefix, id)
+}
+
+// Return a string of the form "o.[id]"
+func outboundKeyFromMID(id uint16) string {
+ return fmt.Sprintf("%s%d", outboundPrefix, id)
+}
+
+// govern which outgoing messages are persisted
+func persistOutbound(s Store, m packets.ControlPacket) {
+ switch m.Details().Qos {
+ case 0:
+ switch m.(type) {
+ case *packets.PubackPacket, *packets.PubcompPacket:
+ // Sending puback. delete matching publish
+ // from ibound
+ s.Del(inboundKeyFromMID(m.Details().MessageID))
+ }
+ case 1:
+ switch m.(type) {
+ case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket:
+ // Sending publish. store in obound
+ // until puback received
+ s.Put(outboundKeyFromMID(m.Details().MessageID), m)
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid message type")
+ }
+ case 2:
+ switch m.(type) {
+ case *packets.PublishPacket:
+ // Sending publish. store in obound
+ // until pubrel received
+ s.Put(outboundKeyFromMID(m.Details().MessageID), m)
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid message type")
+ }
+ }
+}
+
+// govern which incoming messages are persisted
+func persistInbound(s Store, m packets.ControlPacket) {
+ switch m.Details().Qos {
+ case 0:
+ switch m.(type) {
+ case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket:
+ // Received a puback. delete matching publish
+ // from obound
+ s.Del(outboundKeyFromMID(m.Details().MessageID))
+ case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket:
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid messages type")
+ }
+ case 1:
+ switch m.(type) {
+ case *packets.PublishPacket, *packets.PubrelPacket:
+ // Received a publish. store it in ibound
+ // until puback sent
+ s.Put(inboundKeyFromMID(m.Details().MessageID), m)
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid messages type")
+ }
+ case 2:
+ switch m.(type) {
+ case *packets.PublishPacket:
+ // Received a publish. store it in ibound
+ // until pubrel received
+ s.Put(inboundKeyFromMID(m.Details().MessageID), m)
+ default:
+ ERROR.Println(STR, "Asked to persist an invalid messages type")
+ }
+ }
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/token.go b/vendor/github.com/eclipse/paho.mqtt.golang/token.go
new file mode 100644
index 0000000..0818553
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/token.go
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2014 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Allan Stockdill-Mander
+ */
+
+package mqtt
+
+import (
+ "sync"
+ "time"
+
+ "github.com/eclipse/paho.mqtt.golang/packets"
+)
+
+// PacketAndToken is a struct that contains both a ControlPacket and a
+// Token. This struct is passed via channels between the client interface
+// code and the underlying code responsible for sending and receiving
+// MQTT messages.
+type PacketAndToken struct {
+ p packets.ControlPacket
+ t tokenCompletor
+}
+
+// Token defines the interface for the tokens used to indicate when
+// actions have completed.
+type Token interface {
+ Wait() bool
+ WaitTimeout(time.Duration) bool
+ Error() error
+}
+
+type TokenErrorSetter interface {
+ setError(error)
+}
+
+type tokenCompletor interface {
+ Token
+ TokenErrorSetter
+ flowComplete()
+}
+
+type baseToken struct {
+ m sync.RWMutex
+ complete chan struct{}
+ err error
+}
+
+// Wait will wait indefinitely for the Token to complete, ie the Publish
+// to be sent and confirmed receipt from the broker
+func (b *baseToken) Wait() bool {
+ <-b.complete
+ return true
+}
+
+// WaitTimeout takes a time.Duration to wait for the flow associated with the
+// Token to complete, returns true if it returned before the timeout or
+// returns false if the timeout occurred. In the case of a timeout the Token
+// does not have an error set in case the caller wishes to wait again
+func (b *baseToken) WaitTimeout(d time.Duration) bool {
+ b.m.Lock()
+ defer b.m.Unlock()
+
+ timer := time.NewTimer(d)
+ select {
+ case <-b.complete:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ return true
+ case <-timer.C:
+ }
+
+ return false
+}
+
+func (b *baseToken) flowComplete() {
+ select {
+ case <-b.complete:
+ default:
+ close(b.complete)
+ }
+}
+
+func (b *baseToken) Error() error {
+ b.m.RLock()
+ defer b.m.RUnlock()
+ return b.err
+}
+
+func (b *baseToken) setError(e error) {
+ b.m.Lock()
+ b.err = e
+ b.flowComplete()
+ b.m.Unlock()
+}
+
+func newToken(tType byte) tokenCompletor {
+ switch tType {
+ case packets.Connect:
+ return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}}
+ case packets.Subscribe:
+ return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)}
+ case packets.Publish:
+ return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}}
+ case packets.Unsubscribe:
+ return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}}
+ case packets.Disconnect:
+ return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}}
+ }
+ return nil
+}
+
+// ConnectToken is an extension of Token containing the extra fields
+// required to provide information about calls to Connect()
+type ConnectToken struct {
+ baseToken
+ returnCode byte
+ sessionPresent bool
+}
+
+// ReturnCode returns the acknowlegement code in the connack sent
+// in response to a Connect()
+func (c *ConnectToken) ReturnCode() byte {
+ c.m.RLock()
+ defer c.m.RUnlock()
+ return c.returnCode
+}
+
+// SessionPresent returns a bool representing the value of the
+// session present field in the connack sent in response to a Connect()
+func (c *ConnectToken) SessionPresent() bool {
+ c.m.RLock()
+ defer c.m.RUnlock()
+ return c.sessionPresent
+}
+
+// PublishToken is an extension of Token containing the extra fields
+// required to provide information about calls to Publish()
+type PublishToken struct {
+ baseToken
+ messageID uint16
+}
+
+// MessageID returns the MQTT message ID that was assigned to the
+// Publish packet when it was sent to the broker
+func (p *PublishToken) MessageID() uint16 {
+ return p.messageID
+}
+
+// SubscribeToken is an extension of Token containing the extra fields
+// required to provide information about calls to Subscribe()
+type SubscribeToken struct {
+ baseToken
+ subs []string
+ subResult map[string]byte
+}
+
+// Result returns a map of topics that were subscribed to along with
+// the matching return code from the broker. This is either the Qos
+// value of the subscription or an error code.
+func (s *SubscribeToken) Result() map[string]byte {
+ s.m.RLock()
+ defer s.m.RUnlock()
+ return s.subResult
+}
+
+// UnsubscribeToken is an extension of Token containing the extra fields
+// required to provide information about calls to Unsubscribe()
+type UnsubscribeToken struct {
+ baseToken
+}
+
+// DisconnectToken is an extension of Token containing the extra fields
+// required to provide information about calls to Disconnect()
+type DisconnectToken struct {
+ baseToken
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/topic.go b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go
new file mode 100644
index 0000000..6fa3ad2
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2014 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+import (
+ "errors"
+ "strings"
+)
+
+//ErrInvalidQos is the error returned when an packet is to be sent
+//with an invalid Qos value
+var ErrInvalidQos = errors.New("Invalid QoS")
+
+//ErrInvalidTopicEmptyString is the error returned when a topic string
+//is passed in that is 0 length
+var ErrInvalidTopicEmptyString = errors.New("Invalid Topic; empty string")
+
+//ErrInvalidTopicMultilevel is the error returned when a topic string
+//is passed in that has the multi level wildcard in any position but
+//the last
+var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard must be last level")
+
+// Topic Names and Topic Filters
+// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard
+// to the validity of Topic strings.
+// - A Topic must be between 1 and 65535 bytes.
+// - A Topic is case sensitive.
+// - A Topic may contain whitespace.
+// - A Topic containing a leading forward slash is different than a Topic without.
+// - A Topic may be "/" (two levels, both empty string).
+// - A Topic must be UTF-8 encoded.
+// - A Topic may contain any number of levels.
+// - A Topic may contain an empty level (two forward slashes in a row).
+// - A TopicName may not contain a wildcard.
+// - A TopicFilter may only have a # (multi-level) wildcard as the last level.
+// - A TopicFilter may contain any number of + (single-level) wildcards.
+// - A TopicFilter with a # will match the absense of a level
+// Example: a subscription to "foo/#" will match messages published to "foo".
+
+func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) {
+ var topics []string
+ var qoss []byte
+ for topic, qos := range subs {
+ if err := validateTopicAndQos(topic, qos); err != nil {
+ return nil, nil, err
+ }
+ topics = append(topics, topic)
+ qoss = append(qoss, qos)
+ }
+
+ return topics, qoss, nil
+}
+
+func validateTopicAndQos(topic string, qos byte) error {
+ if len(topic) == 0 {
+ return ErrInvalidTopicEmptyString
+ }
+
+ levels := strings.Split(topic, "/")
+ for i, level := range levels {
+ if level == "#" && i != len(levels)-1 {
+ return ErrInvalidTopicMultilevel
+ }
+ }
+
+ if qos < 0 || qos > 2 {
+ return ErrInvalidQos
+ }
+ return nil
+}
diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/trace.go b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go
new file mode 100644
index 0000000..195c817
--- /dev/null
+++ b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * which accompanies this distribution, and is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Contributors:
+ * Seth Hoenig
+ * Allan Stockdill-Mander
+ * Mike Robertson
+ */
+
+package mqtt
+
+type (
+ // Logger interface allows implementations to provide to this package any
+ // object that implements the methods defined in it.
+ Logger interface {
+ Println(v ...interface{})
+ Printf(format string, v ...interface{})
+ }
+
+ // NOOPLogger implements the logger that does not perform any operation
+ // by default. This allows us to efficiently discard the unwanted messages.
+ NOOPLogger struct{}
+)
+
+func (NOOPLogger) Println(v ...interface{}) {}
+func (NOOPLogger) Printf(format string, v ...interface{}) {}
+
+// Internal levels of library output that are initialised to not print
+// anything but can be overridden by programmer
+var (
+ ERROR Logger = NOOPLogger{}
+ CRITICAL Logger = NOOPLogger{}
+ WARN Logger = NOOPLogger{}
+ DEBUG Logger = NOOPLogger{}
+)
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..3cd3249
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
+ if in.IsNil() {
+ return src
+ }
+ out := reflect.New(in.Type().Elem())
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+ }
+ if in.IsNil() {
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..63b0f08
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
+ }
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..35b882c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..dea2617
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type generatedDiscarder interface {
+ XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ if m, ok := m.(generatedDiscarder); ok {
+ m.XXX_DiscardUnknown()
+ return
+ }
+ // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+ // but the master branch has no implementation for InternalMessageInfo,
+ // so it would be more work to replicate that approach.
+ discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+ di := atomicLoadDiscardInfo(&a.discard)
+ if di == nil {
+ di = getDiscardInfo(reflect.TypeOf(m).Elem())
+ atomicStoreDiscardInfo(&a.discard, di)
+ }
+ di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []discardFieldInfo
+ unrecognized field
+}
+
+type discardFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+ discard func(src pointer)
+}
+
+var (
+ discardInfoMap = map[reflect.Type]*discardInfo{}
+ discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+ discardInfoLock.Lock()
+ defer discardInfoLock.Unlock()
+ di := discardInfoMap[t]
+ if di == nil {
+ di = &discardInfo{typ: t}
+ discardInfoMap[t] = di
+ }
+ return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&di.initialized) == 0 {
+ di.computeDiscardInfo()
+ }
+
+ for _, fi := range di.fields {
+ sfp := src.offset(fi.field)
+ fi.discard(sfp)
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+ // Ignore lock since DiscardUnknown is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ DiscardUnknown(m)
+ }
+ }
+ }
+
+ if di.unrecognized.IsValid() {
+ *src.offset(di.unrecognized).toBytes() = nil
+ }
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+ di.lock.Lock()
+ defer di.lock.Unlock()
+ if di.initialized != 0 {
+ return
+ }
+ t := di.typ
+ n := t.NumField()
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ dfi := discardFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+ case isSlice: // E.g., []*pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sps := src.getPointerSlice()
+ for _, sp := range sps {
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ default: // E.g., *pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+ default: // E.g., map[K]V
+ if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+ dfi.discard = func(src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ DiscardUnknown(val.Interface().(Message))
+ }
+ }
+ } else {
+ dfi.discard = func(pointer) {} // Noop
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ dfi.discard = func(src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ DiscardUnknown(sv.Interface().(Message))
+ }
+ }
+ }
+ }
+ default:
+ continue
+ }
+ di.fields = append(di.fields, dfi)
+ }
+
+ di.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ di.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(m); err == nil {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..3abfed2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "reflect"
+)
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
+ }
+ return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ siz := Size(pb)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..f9b6e41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,301 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1 := extensionAsLegacyType(e1.value)
+ m2 := extensionAsLegacyType(e2.value)
+
+ if m1 == nil && m2 == nil {
+ // Both have only encoded form.
+ if bytes.Equal(e1.enc, e2.enc) {
+ continue
+ }
+ // The bytes are different, but the extensions might still be
+ // equal. We need to decode them to compare.
+ }
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ // If both have only encoded form and the bytes are the same,
+ // it is handled above. We get here when the bytes are different.
+ // We don't know how to decode it, so just compare them as byte
+ // slices.
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ return false
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..fa88add
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,607 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+ switch p := p.(type) {
+ case extendableProto:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return p, nil
+ case extendableProtoV1:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return extensionAdapter{p}, nil
+ }
+ // Don't allocate a specific error containing %T:
+ // this is the hot path for Clone and MarshalText.
+ return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+ v := reflect.ValueOf(x)
+ return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+
+ // value is a concrete value for the extension field. Let the type of
+ // desc.ExtensionType be the "API type" and the type of Extension.value
+ // be the "storage type". The API type and storage type are the same except:
+ // * For scalars (except []byte), the API type uses *T,
+ // while the storage type uses T.
+ // * For repeated fields, the API type uses []T, while the storage type
+ // uses *[]T.
+ //
+ // The reason for the divergence is so that the storage type more naturally
+ // matches what is expected of when retrieving the values through the
+ // protobuf reflection APIs.
+ //
+ // The value may only be populated if desc is also populated.
+ value interface{}
+
+ // enc is the raw bytes for the extension field.
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, err := extendable(base)
+ if err != nil {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, err := extendable(pb)
+ if err != nil {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok := extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+
+ if extension.ExtendedType != nil {
+ // can only check type if this is a complete descriptor
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return extensionAsLegacyType(e.value), nil
+ }
+
+ if extension.ExtensionType == nil {
+ // incomplete descriptor
+ return e.enc, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = extensionAsStorageType(v)
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return extensionAsLegacyType(e.value), nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ if extension.ExtensionType == nil {
+ // incomplete descriptor, so no default
+ return nil, ErrMissingExtension
+ }
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ unmarshal := typeUnmarshaler(t, extension.Tag)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate space to store the pointer/slice.
+ value := reflect.New(t).Elem()
+
+ var err error
+ for {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ wire := int(x) & 7
+
+ b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, err := extendable(pb)
+ if err != nil {
+ return err
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ // Represent primitive types as a pointer to the value.
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Slice:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ }
+ return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ case reflect.Slice:
+ // Represent slice types as a pointer to the value.
+ if rv.Type().Elem().Kind() != reflect.Uint8 {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ }
+ return v
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..fdd328b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,965 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+ if e.field == "" {
+ return fmt.Sprintf("proto: required field not set")
+ }
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+ if e.field == "" {
+ return "proto: invalid UTF-8 detected"
+ }
+ return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+ return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+ if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+ return true
+ }
+ if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+ return true
+ }
+ return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+ if err == nil {
+ return true // not an error
+ }
+ if !isNonFatal(err) {
+ return false // fatal error
+ }
+ if nf.E == nil {
+ nf.E = err // store first instance of non-fatal error
+ }
+ return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{vs: vs}
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+const (
+ // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion3 = true
+
+ // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion2 = true
+
+ // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..f48a756
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..94fa919
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,360 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "reflect"
+ "sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ if deref {
+ u = u.Elem()
+ }
+ return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+ return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
+ if n < m {
+ s.SetLen(n + 1)
+ } else {
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+ }
+ return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
+}
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
+ }
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
+ }
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
+ return
+ }
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
+ }
+ p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+ return
+ }
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
+ }
+ p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
+ }
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..dbfffe0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,313 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ } else {
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+ }
+ if deref {
+ p.p = *(*unsafe.Pointer)(p.p)
+ }
+ return p
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+ return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..a4b8c0c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,544 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+
+ mtype reflect.Type // set for map types only
+ MapKeyProp *Properties // set for map types only
+ MapValProp *Properties // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ log.Printf("proto: tag has too few fields: %q", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ case "zigzag64":
+ p.WireType = WireVarint
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ log.Printf("proto: tag has unknown wire type: %q", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+outer:
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break outer
+ }
+ }
+ }
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ switch t1 := typ; t1.Kind() {
+ case reflect.Ptr:
+ if t1.Elem().Kind() == reflect.Struct {
+ p.stype = t1.Elem()
+ }
+
+ case reflect.Slice:
+ if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+ p.stype = t2.Elem()
+ }
+
+ case reflect.Map:
+ p.mtype = t1
+ p.MapKeyProp = &Properties{}
+ p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.MapValProp = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+type (
+ oneofFuncsIface interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ oneofWrappersIface interface {
+ XXX_OneofWrappers() []interface{}
+ }
+)
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ return prop
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ var oots []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oots = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oots = m.XXX_OneofWrappers()
+ }
+ if len(oots) > 0 {
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypedNils[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..5cb11fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2776 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+ deref bool // dereference the pointer before operating on it; implies isptr
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errLater error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range u.fields {
+ if f.required {
+ if ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name}
+ }
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.sizecache = invalidField
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ u.v1extensions = toField(&f)
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ t = t.Elem()
+ }
+ sizer, marshaler := typeMarshaler(t, tags, false, false)
+ var deref bool
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ t = reflect.PtrTo(t)
+ deref = true
+ }
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ isptr: t.Kind() == reflect.Ptr,
+ deref: deref,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ }
+ }
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ validateUTF8 := true
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return sizeStringPtr, appendUTF8StringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendUTF8StringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendUTF8StringValueNoZero
+ }
+ return sizeStringValue, appendUTF8StringValue
+ }
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+
+ // If value is a message with nested maps, calling
+ // valSizer in marshal may be quadratic. We should use
+ // cached version in marshal (but not in size).
+ // If value is not message type, we don't have size cache,
+ // but it cannot be nested either. Just use valSizer.
+ valCachedSizer := valSizer
+ if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+ u := getMarshalInfo(valType.Elem())
+ valCachedSizer = func(ptr pointer, tagsize int) int {
+ // Same as message sizer, but use cache.
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.cachedsize(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ }
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+
+ var nerr nonFatal
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ var nerr nonFatal
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, err := m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..5525def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("message field %s without pointer", tf))
+ case isSlice: // E.g., []*pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mi.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mi.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..acee2fc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2053 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1< 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ if errLater == nil {
+ errLater = r
+ }
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if reqMask != u.reqMask && errLater == nil {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ errLater = &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ u.oldExtensions = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+ }
+
+ // Find any types associated with oneof fields.
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ for _, v := range oneofImplementers {
+ tptr := reflect.TypeOf(v) // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
+ }
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
+ }
+ }
+
+ }
+
+ // Get extension ranges, if any.
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0, "")
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ proto3 := false
+ validateUTF8 := true
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ if tag == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return unmarshalUTF8StringPtr
+ }
+ if slice {
+ return unmarshalUTF8StringSlice
+ }
+ return unmarshalUTF8StringValue
+ }
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ var nerr nonFatal
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if nerr.Merge(err) {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nerr.E
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ var nerr nonFatal
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if !nerr.Merge(err) {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nerr.E
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) == 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..1aaee72
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, err := extendable(pv.Interface()); err == nil {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..bb55a3a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ return um.UnmarshalText([]byte(s))
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..31cd846
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+package timestamp
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+ return fileDescriptor_292007bbfe81227e, []int{0}
+}
+
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Timestamp.Unmarshal(m, b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
+}
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Timestamp.Merge(m, src)
+}
+func (m *Timestamp) XXX_Size() int {
+ return xxx_messageInfo_Timestamp.Size(m)
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+ xxx_messageInfo_Timestamp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
+
+func (m *Timestamp) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+}
+
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
+
+var fileDescriptor_292007bbfe81227e = []byte{
+ // 191 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+ 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+ 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+ 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+ 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+ 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+ 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+ 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+ 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+ 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+ 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 0000000..eafb3fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,135 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ int32 nanos = 2;
+}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
new file mode 100644
index 0000000..14127cd
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
@@ -0,0 +1,9 @@
+(The MIT License)
+
+Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
new file mode 100644
index 0000000..195333e
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
@@ -0,0 +1,41 @@
+# Windows Terminal Sequences
+
+This library allow for enabling Windows terminal color support for Go.
+
+See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
+
+## Usage
+
+```go
+import (
+ "syscall"
+
+ sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func main() {
+ sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
+}
+
+```
+
+## Authors
+
+The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
+
+We thank all the authors who provided code to this library:
+
+* Felix Kollmann
+* Nicolas Perraut
+
+## License
+
+(The MIT License)
+
+Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
new file mode 100644
index 0000000..716c613
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
@@ -0,0 +1 @@
+module github.com/konsorten/go-windows-terminal-sequences
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
new file mode 100644
index 0000000..ef18d8f
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package sequences
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
+ setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
+)
+
+func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
+ const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
+
+ var mode uint32
+ err := syscall.GetConsoleMode(syscall.Stdout, &mode)
+ if err != nil {
+ return err
+ }
+
+ if enable {
+ mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ } else {
+ mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ }
+
+ ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
+ if ret == 0 {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
new file mode 100644
index 0000000..df61a6f
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
@@ -0,0 +1,11 @@
+// +build linux darwin
+
+package sequences
+
+import (
+ "fmt"
+)
+
+func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error {
+ return fmt.Errorf("windows only package")
+}
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..6b7d7d1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -0,0 +1,2 @@
+logrus
+vendor
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..848938a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -0,0 +1,25 @@
+language: go
+go_import_path: github.com/sirupsen/logrus
+git:
+ depth: 1
+env:
+ - GO111MODULE=on
+ - GO111MODULE=off
+go: [ 1.11.x, 1.12.x ]
+os: [ linux, osx ]
+matrix:
+ exclude:
+ - go: 1.12.x
+ env: GO111MODULE=off
+ - go: 1.11.x
+ os: osx
+install:
+ - ./travis/install.sh
+ - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi
+ - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi
+script:
+ - ./travis/cross_build.sh
+ - export GOMAXPROCS=4
+ - export GORACE=halt_on_error=1
+ - go test -race -v ./...
+ - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..51a7ab0
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,200 @@
+# 1.4.2
+ * Fixes build break for plan9, nacl, solaris
+# 1.4.1
+This new release introduces:
+ * Enhance TextFormatter to not print caller information when they are empty (#944)
+ * Remove dependency on golang.org/x/crypto (#932, #943)
+
+Fixes:
+ * Fix Entry.WithContext method to return a copy of the initial entry (#941)
+
+# 1.4.0
+This new release introduces:
+ * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
+ * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911)
+ * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
+
+Fixes:
+ * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
+ * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
+ * Fix infinite recursion on unknown `Level.String()` (#907)
+ * Fix race condition in `getCaller` (#916).
+
+
+# 1.3.0
+This new release introduces:
+ * Log, Logf, Logln functions for Logger and Entry that take a Level
+
+Fixes:
+ * Building prometheus node_exporter on AIX (#840)
+ * Race condition in TextFormatter (#468)
+ * Travis CI import path (#868)
+ * Remove coloured output on Windows (#862)
+ * Pointer to func as field in JSONFormatter (#870)
+ * Properly marshal Levels (#873)
+
+# 1.2.0
+This new release introduces:
+ * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
+ * A new trace level named `Trace` whose level is below `Debug`
+ * A configurable exit function to be called upon a Fatal trace
+ * The `Level` object now implements `encoding.TextUnmarshaler` interface
+
+# 1.1.1
+This is a bug fix release.
+ * fix the build break on Solaris
+ * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
+
+# 1.1.0
+This new release introduces:
+ * several fixes:
+ * a fix for a race condition on entry formatting
+ * proper cleanup of previously used entries before putting them back in the pool
+ * the extra new line at the end of message in text formatter has been removed
+ * a new global public API to check if a level is activated: IsLevelEnabled
+ * the following methods have been added to the Logger object
+ * IsLevelEnabled
+ * SetFormatter
+ * SetOutput
+ * ReplaceHooks
+ * introduction of go module
+ * an indent configuration for the json formatter
+ * output colour support for windows
+ * the field sort function is now configurable for text formatter
+ * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
+
+# 1.0.6
+
+This new release introduces:
+ * a new api WithTime which allows to easily force the time of the log entry
+ which is mostly useful for logger wrapper
+ * a fix reverting the immutability of the entry given as parameter to the hooks
+ a new configuration field of the json formatter in order to put all the fields
+ in a nested dictionnary
+ * a new SetOutput method in the Logger
+ * a new configuration of the textformatter to configure the name of the default keys
+ * a new configuration of the text formatter to disable the level truncation
+
+# 1.0.5
+
+* Fix hooks race (#707)
+* Fix panic deadlock (#695)
+
+# 1.0.4
+
+* Fix race when adding hooks (#612)
+* Fix terminal check in AppEngine (#635)
+
+# 1.0.3
+
+* Replace example files with testable examples
+
+# 1.0.2
+
+* bug: quote non-string values in text formatter (#583)
+* Make (*Logger) SetLevel a public method
+
+# 1.0.1
+
+* bug: fix escaping in text formatter (#575)
+
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
new file mode 100644
index 0000000..a4796eb
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,495 @@
+# Logrus
[](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger.
+
+**Seeing weird case-sensitive problems?** It's in the past been possible to
+import Logrus as both upper- and lower-case. Due to the Go package environment,
+this caused issues in the community and we needed a standard. Some environments
+experienced problems with the upper-case variant, so the lower-case was decided.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+For an in-depth explanation of the casing issue, see [this
+comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
+
+**Are you interested in assisting in maintaining Logrus?** Currently I have a
+lot of obligations, and I am unable to provide Logrus with the maintainership it
+needs. If you'd like to help, please reach out to me at `simon at author's
+username dot com`.
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+```
+To ensure this behaviour even if a TTY is attached, set your formatter as follows:
+
+```go
+ log.SetFormatter(&log.TextFormatter{
+ DisableColors: true,
+ FullTimestamp: true,
+ })
+```
+
+#### Logging Method Name
+
+If you wish to add the calling method as a field, instruct the logger via:
+```go
+log.SetReportCaller(true)
+```
+This adds the caller as 'method' like so:
+
+```json
+{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
+"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
+```
+
+```text
+time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
+```
+Note that this does add measurable overhead - the cost will depend on the version of Go, but is
+between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
+environment via benchmarks:
+```
+go test -bench=.*CallerTracing
+```
+
+
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stdout instead of the default stderr
+ // Can be any io.Writer, see below for File example
+ log.SetOutput(os.Stdout)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "os"
+ "github.com/sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stdout
+
+ // You could set this to any `io.Writer` such as a file
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+ // if err == nil {
+ // log.Out = file
+ // } else {
+ // log.Info("Failed to log to file, using default stderr")
+ // }
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging through logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
+ logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
+
+
+#### Level logging
+
+Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Trace("Something very low level.")
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`. For Windows, see
+ [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * When colors are enabled, levels are truncated to 4 characters by default. To disable
+ truncation set the `DisableLevelTruncation` field to `true`.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+* `logrus.JSONFormatter`. Logs fields as JSON.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+
+Third party logging formatters:
+
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
+* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+import(
+ "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSomething(t*testing.T){
+ logger, hook := test.NewNullLogger()
+ logger.Error("Helloerror")
+
+ assert.Equal(t, 1, len(hook.Entries))
+ assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+ hook.Reset()
+ assert.Nil(t, hook.LastEntry())
+}
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..8fd189e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,76 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
+
+// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func DeferExitHandler(handler func()) {
+ handlers = append([]func(){handler}, handlers...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
new file mode 100644
index 0000000..96c2ce1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
+version: "{build}"
+platform: x64
+clone_folder: c:\gopath\src\github.com\sirupsen\logrus
+environment:
+ GOPATH: c:\gopath
+branches:
+ only:
+ - master
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+build_script:
+ - go get -t
+ - go test
diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 0000000..da67aba
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 0000000..63e2558
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,407 @@
+package logrus
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ bufferPool *sync.Pool
+
+ // qualified package name, cached at first use
+ logrusPackage string
+
+ // Positions in the call stack when tracing to report the calling method
+ minimumCallerDepth int
+
+ // Used for caller information initialisation
+ callerInitOnce sync.Once
+)
+
+const (
+ maximumCallerDepth int = 25
+ knownLogrusFrames int = 4
+)
+
+func init() {
+ bufferPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+
+ // start at the bottom of the stack before the package-name cache is primed
+ minimumCallerDepth = 1
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
+// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
+// reused and passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
+ // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
+ Level Level
+
+ // Calling method, with package name
+ Caller *runtime.Frame
+
+ // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+
+ // When formatter is called in entry.log(), a Buffer may be set to entry
+ Buffer *bytes.Buffer
+
+ // Contains the context set by the user. Useful for hook processing etc.
+ Context context.Context
+
+ // err may contain a field formatting error
+ err string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, plus one optional. Give a little extra room.
+ Data: make(Fields, 6),
+ }
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ return "", err
+ }
+ str := string(serialized)
+ return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a context to the Entry.
+func (entry *Entry) WithContext(ctx context.Context) *Entry {
+ return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx}
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ fieldErr := entry.err
+ for k, v := range fields {
+ isErrField := false
+ if t := reflect.TypeOf(v); t != nil {
+ switch t.Kind() {
+ case reflect.Func:
+ isErrField = true
+ case reflect.Ptr:
+ isErrField = t.Elem().Kind() == reflect.Func
+ }
+ }
+ if isErrField {
+ tmp := fmt.Sprintf("can not add field %q", k)
+ if fieldErr != "" {
+ fieldErr = entry.err + ", " + tmp
+ } else {
+ fieldErr = tmp
+ }
+ } else {
+ data[k] = v
+ }
+ }
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
+}
+
+// Overrides the time of the Entry.
+func (entry *Entry) WithTime(t time.Time) *Entry {
+ return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context}
+}
+
+// getPackageName reduces a fully qualified function name to the package name
+// There really ought to be to be a better way...
+func getPackageName(f string) string {
+ for {
+ lastPeriod := strings.LastIndex(f, ".")
+ lastSlash := strings.LastIndex(f, "/")
+ if lastPeriod > lastSlash {
+ f = f[:lastPeriod]
+ } else {
+ break
+ }
+ }
+
+ return f
+}
+
+// getCaller retrieves the name of the first non-logrus calling function
+func getCaller() *runtime.Frame {
+
+ // cache this package's fully-qualified name
+ callerInitOnce.Do(func() {
+ pcs := make([]uintptr, 2)
+ _ = runtime.Callers(0, pcs)
+ logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name())
+
+ // now that we have the cache, we can skip a minimum count of known-logrus functions
+ // XXX this is dubious, the number of frames may vary
+ minimumCallerDepth = knownLogrusFrames
+ })
+
+ // Restrict the lookback frames to avoid runaway lookups
+ pcs := make([]uintptr, maximumCallerDepth)
+ depth := runtime.Callers(minimumCallerDepth, pcs)
+ frames := runtime.CallersFrames(pcs[:depth])
+
+ for f, again := frames.Next(); again; f, again = frames.Next() {
+ pkg := getPackageName(f.Function)
+
+ // If the caller isn't part of this package, we're done
+ if pkg != logrusPackage {
+ return &f
+ }
+ }
+
+ // if we got here, we failed to find the caller's context
+ return nil
+}
+
+func (entry Entry) HasCaller() (has bool) {
+ return entry.Logger != nil &&
+ entry.Logger.ReportCaller &&
+ entry.Caller != nil
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
+
+ // Default to now, but allow users to override if they want.
+ //
+ // We don't have to worry about polluting future calls to Entry#log()
+ // with this assignment because this function is declared with a
+ // non-pointer receiver.
+ if entry.Time.IsZero() {
+ entry.Time = time.Now()
+ }
+
+ entry.Level = level
+ entry.Message = msg
+ if entry.Logger.ReportCaller {
+ entry.Caller = getCaller()
+ }
+
+ entry.fireHooks()
+
+ buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer.Reset()
+ defer bufferPool.Put(buffer)
+ entry.Buffer = buffer
+
+ entry.write()
+
+ entry.Buffer = nil
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) fireHooks() {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ err := entry.Logger.Hooks.Fire(entry.Level, entry)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ }
+}
+
+func (entry *Entry) write() {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ } else {
+ _, err = entry.Logger.Out.Write(serialized)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+ }
+}
+
+func (entry *Entry) Log(level Level, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.log(level, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Trace(args ...interface{}) {
+ entry.Log(TraceLevel, args...)
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ entry.Log(DebugLevel, args...)
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ entry.Log(InfoLevel, args...)
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ entry.Log(WarnLevel, args...)
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ entry.Log(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ entry.Log(FatalLevel, args...)
+ entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ entry.Log(PanicLevel, args...)
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.Log(level, fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Tracef(format string, args ...interface{}) {
+ entry.Logf(TraceLevel, format, args...)
+}
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ entry.Logf(DebugLevel, format, args...)
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ entry.Logf(InfoLevel, format, args...)
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ entry.Logf(WarnLevel, format, args...)
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ entry.Logf(ErrorLevel, format, args...)
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ entry.Logf(FatalLevel, format, args...)
+ entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ entry.Logf(PanicLevel, format, args...)
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Logln(level Level, args ...interface{}) {
+ if entry.Logger.IsLevelEnabled(level) {
+ entry.Log(level, entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Traceln(args ...interface{}) {
+ entry.Logln(TraceLevel, args...)
+}
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ entry.Logln(DebugLevel, args...)
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ entry.Logln(InfoLevel, args...)
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ entry.Logln(WarnLevel, args...)
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ entry.Logln(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ entry.Logln(FatalLevel, args...)
+ entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ entry.Logln(PanicLevel, args...)
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 0000000..62fc2f2
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,225 @@
+package logrus
+
+import (
+ "context"
+ "io"
+ "time"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.SetOutput(out)
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.SetFormatter(formatter)
+}
+
+// SetReportCaller sets whether the standard logger will include the calling
+// method as a field.
+func SetReportCaller(include bool) {
+ std.SetReportCaller(include)
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.SetLevel(level)
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ return std.GetLevel()
+}
+
+// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
+func IsLevelEnabled(level Level) bool {
+ return std.IsLevelEnabled(level)
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.AddHook(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithContext creates an entry from the standard logger and adds a context to it.
+func WithContext(ctx context.Context) *Entry {
+ return std.WithContext(ctx)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// WithTime creats an entry from the standard logger and overrides the time of
+// logs generated with it.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithTime(t time.Time) *Entry {
+ return std.WithTime(t)
+}
+
+// Trace logs a message at level Trace on the standard logger.
+func Trace(args ...interface{}) {
+ std.Trace(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Tracef logs a message at level Trace on the standard logger.
+func Tracef(format string, args ...interface{}) {
+ std.Tracef(format, args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Traceln logs a message at level Trace on the standard logger.
+func Traceln(args ...interface{}) {
+ std.Traceln(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..4088837
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,78 @@
+package logrus
+
+import "time"
+
+// Default key names for the default fields
+const (
+ defaultTimestampFormat = time.RFC3339
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+ FieldKeyLogrusError = "logrus_error"
+ FieldKeyFunc = "func"
+ FieldKeyFile = "file"
+)
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
+ timeKey := fieldMap.resolve(FieldKeyTime)
+ if t, ok := data[timeKey]; ok {
+ data["fields."+timeKey] = t
+ delete(data, timeKey)
+ }
+
+ msgKey := fieldMap.resolve(FieldKeyMsg)
+ if m, ok := data[msgKey]; ok {
+ data["fields."+msgKey] = m
+ delete(data, msgKey)
+ }
+
+ levelKey := fieldMap.resolve(FieldKeyLevel)
+ if l, ok := data[levelKey]; ok {
+ data["fields."+levelKey] = l
+ delete(data, levelKey)
+ }
+
+ logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
+ if l, ok := data[logrusErrKey]; ok {
+ data["fields."+logrusErrKey] = l
+ delete(data, logrusErrKey)
+ }
+
+ // If reportCaller is not set, 'func' will not conflict.
+ if reportCaller {
+ funcKey := fieldMap.resolve(FieldKeyFunc)
+ if l, ok := data[funcKey]; ok {
+ data["fields."+funcKey] = l
+ }
+ fileKey := fieldMap.resolve(FieldKeyFile)
+ if l, ok := data[fileKey]; ok {
+ data["fields."+fileKey] = l
+ }
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
new file mode 100644
index 0000000..12fdf98
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -0,0 +1,10 @@
+module github.com/sirupsen/logrus
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/konsorten/go-windows-terminal-sequences v1.0.1
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/stretchr/objx v0.1.1 // indirect
+ github.com/stretchr/testify v1.2.2
+ golang.org/x/sys v0.0.0-20190422165155-953cdadca894
+)
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
new file mode 100644
index 0000000..596c318
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -0,0 +1,16 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..098a21a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,121 @@
+package logrus
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "runtime"
+)
+
+type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+// JSONFormatter formats logs into parsable json
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
+ DataKey string
+
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message",
+ // FieldKeyFunc: "@caller",
+ // },
+ // }
+ FieldMap FieldMap
+
+ // CallerPrettyfier can be set by the user to modify the content
+ // of the function and file keys in the json data when ReportCaller is
+ // activated. If any of the returned value is the empty string the
+ // corresponding key will be removed from json fields.
+ CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+ // PrettyPrint will indent all json logs
+ PrettyPrint bool
+}
+
+// Format renders a single log entry
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+4)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+
+ if f.DataKey != "" {
+ newData := make(Fields, 4)
+ newData[f.DataKey] = data
+ data = newData
+ }
+
+ prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = defaultTimestampFormat
+ }
+
+ if entry.err != "" {
+ data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
+ }
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+ if entry.HasCaller() {
+ funcVal := entry.Caller.Function
+ fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ }
+ if funcVal != "" {
+ data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
+ }
+ if fileVal != "" {
+ data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
+ }
+ }
+
+ var b *bytes.Buffer
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ encoder := json.NewEncoder(b)
+ if f.PrettyPrint {
+ encoder.SetIndent("", " ")
+ }
+ if err := encoder.Encode(data); err != nil {
+ return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 0000000..c0c0b1e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,351 @@
+package logrus
+
+import (
+ "context"
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventurous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+
+ // Flag for whether to log caller info (off by default)
+ ReportCaller bool
+
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged.
+ Level Level
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+ // Function to exit the application, defaults to `os.Exit()`
+ ExitFunc exitFunc
+}
+
+type exitFunc func(int)
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ ExitFunc: os.Exit,
+ ReportCaller: false,
+ }
+}
+
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ entry.Data = map[string]interface{}{}
+ logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
+}
+
+// Add a context to the log entry.
+func (logger *Logger) WithContext(ctx context.Context) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithContext(ctx)
+}
+
+// Overrides the time of the log entry.
+func (logger *Logger) WithTime(t time.Time) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithTime(t)
+}
+
+func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Logf(level, format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Tracef(format string, args ...interface{}) {
+ logger.Logf(TraceLevel, format, args...)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ logger.Logf(DebugLevel, format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ logger.Logf(InfoLevel, format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ logger.Logf(WarnLevel, format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ logger.Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ logger.Logf(ErrorLevel, format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ logger.Logf(FatalLevel, format, args...)
+ logger.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ logger.Logf(PanicLevel, format, args...)
+}
+
+func (logger *Logger) Log(level Level, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Log(level, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Trace(args ...interface{}) {
+ logger.Log(TraceLevel, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ logger.Log(DebugLevel, args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ logger.Log(InfoLevel, args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Print(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ logger.Log(WarnLevel, args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ logger.Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ logger.Log(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ logger.Log(FatalLevel, args...)
+ logger.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ logger.Log(PanicLevel, args...)
+}
+
+func (logger *Logger) Logln(level Level, args ...interface{}) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Logln(level, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Traceln(args ...interface{}) {
+ logger.Logln(TraceLevel, args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ logger.Logln(DebugLevel, args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ logger.Logln(InfoLevel, args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ logger.Logln(WarnLevel, args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ logger.Warnln(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ logger.Logln(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ logger.Logln(FatalLevel, args...)
+ logger.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ logger.Logln(PanicLevel, args...)
+}
+
+func (logger *Logger) Exit(code int) {
+ runHandlers()
+ if logger.ExitFunc == nil {
+ logger.ExitFunc = os.Exit
+ }
+ logger.ExitFunc(code)
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+ return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+// SetLevel sets the logger level.
+func (logger *Logger) SetLevel(level Level) {
+ atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
+
+// GetLevel returns the logger level.
+func (logger *Logger) GetLevel() Level {
+ return logger.level()
+}
+
+// AddHook adds a hook to the logger hooks.
+func (logger *Logger) AddHook(hook Hook) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Hooks.Add(hook)
+}
+
+// IsLevelEnabled checks if the log level of the logger is greater than the level param
+func (logger *Logger) IsLevelEnabled(level Level) bool {
+ return logger.level() >= level
+}
+
+// SetFormatter sets the logger formatter.
+func (logger *Logger) SetFormatter(formatter Formatter) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Formatter = formatter
+}
+
+// SetOutput sets the logger output.
+func (logger *Logger) SetOutput(output io.Writer) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Out = output
+}
+
+func (logger *Logger) SetReportCaller(reportCaller bool) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.ReportCaller = reportCaller
+}
+
+// ReplaceHooks replaces the logger hooks and returns the old ones
+func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
+ logger.mu.Lock()
+ oldHooks := logger.Hooks
+ logger.Hooks = hooks
+ logger.mu.Unlock()
+ return oldHooks
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..8644761
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,186 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint32
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ if b, err := level.MarshalText(); err == nil {
+ return string(b)
+ } else {
+ return "unknown"
+ }
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ case "trace":
+ return TraceLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (level *Level) UnmarshalText(text []byte) error {
+ l, err := ParseLevel(string(text))
+ if err != nil {
+ return err
+ }
+
+ *level = Level(l)
+
+ return nil
+}
+
+func (level Level) MarshalText() ([]byte, error) {
+ switch level {
+ case TraceLevel:
+ return []byte("trace"), nil
+ case DebugLevel:
+ return []byte("debug"), nil
+ case InfoLevel:
+ return []byte("info"), nil
+ case WarnLevel:
+ return []byte("warning"), nil
+ case ErrorLevel:
+ return []byte("error"), nil
+ case FatalLevel:
+ return []byte("fatal"), nil
+ case PanicLevel:
+ return []byte("panic"), nil
+ }
+
+ return nil, fmt.Errorf("not a valid logrus level %d", level)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+ TraceLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+ // TraceLevel level. Designates finer-grained informational events than the Debug.
+ TraceLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+
+ // IsDebugEnabled() bool
+ // IsInfoEnabled() bool
+ // IsWarnEnabled() bool
+ // IsErrorEnabled() bool
+ // IsFatalEnabled() bool
+ // IsPanicEnabled() bool
+}
+
+// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
+// here for consistancy. Do not use. Use Logger or Entry instead.
+type Ext1FieldLogger interface {
+ FieldLogger
+ Tracef(format string, args ...interface{})
+ Trace(args ...interface{})
+ Traceln(args ...interface{})
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 0000000..2403de9
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
new file mode 100644
index 0000000..3c4f43f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -0,0 +1,13 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
+
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
+}
+
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
new file mode 100644
index 0000000..97af92c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
@@ -0,0 +1,11 @@
+// +build js nacl plan9
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 0000000..3293fb3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,17 @@
+// +build !appengine,!js,!windows,!nacl,!plan9
+
+package logrus
+
+import (
+ "io"
+ "os"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ switch v := w.(type) {
+ case *os.File:
+ return isTerminal(int(v.Fd()))
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
new file mode 100644
index 0000000..f6710b3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
@@ -0,0 +1,11 @@
+package logrus
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermio(fd, unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
new file mode 100644
index 0000000..355dc96
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -0,0 +1,13 @@
+// +build linux aix
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+
+func isTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
+}
+
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
new file mode 100644
index 0000000..572889d
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -0,0 +1,34 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+ "io"
+ "os"
+ "syscall"
+
+ sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func initTerminal(w io.Writer) {
+ switch v := w.(type) {
+ case *os.File:
+ sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
+ }
+}
+
+func checkIfTerminal(w io.Writer) bool {
+ var ret bool
+ switch v := w.(type) {
+ case *os.File:
+ var mode uint32
+ err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
+ ret = (err == nil)
+ default:
+ ret = false
+ }
+ if ret {
+ initTerminal(w)
+ }
+ return ret
+}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..e01587c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,295 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ red = 31
+ yellow = 33
+ blue = 36
+ gray = 37
+)
+
+var baseTimestamp time.Time
+
+func init() {
+ baseTimestamp = time.Now()
+}
+
+// TextFormatter formats logs into text
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
+ EnvironmentOverrideColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+
+ // The keys sorting function, when uninitialized it uses sort.Strings.
+ SortingFunc func([]string)
+
+ // Disables the truncation of the level text to 4 characters.
+ DisableLevelTruncation bool
+
+ // QuoteEmptyFields will wrap empty fields in quotes if true
+ QuoteEmptyFields bool
+
+ // Whether the logger's out is to a terminal
+ isTerminal bool
+
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &TextFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message"}}
+ FieldMap FieldMap
+
+ // CallerPrettyfier can be set by the user to modify the content
+ // of the function and file keys in the data when ReportCaller is
+ // activated. If any of the returned value is the empty string the
+ // corresponding key will be removed from fields.
+ CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+ terminalInitOnce sync.Once
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+ if entry.Logger != nil {
+ f.isTerminal = checkIfTerminal(entry.Logger.Out)
+ }
+}
+
+func (f *TextFormatter) isColored() bool {
+ isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
+
+ if f.EnvironmentOverrideColors {
+ if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
+ isColored = true
+ } else if ok && force == "0" {
+ isColored = false
+ } else if os.Getenv("CLICOLOR") == "0" {
+ isColored = false
+ }
+ }
+
+ return isColored && !f.DisableColors
+}
+
+// Format renders a single log entry
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields)
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+ keys := make([]string, 0, len(data))
+ for k := range data {
+ keys = append(keys, k)
+ }
+
+ var funcVal, fileVal string
+
+ fixedKeys := make([]string, 0, 4+len(data))
+ if !f.DisableTimestamp {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
+ }
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
+ if entry.Message != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
+ }
+ if entry.err != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
+ }
+ if entry.HasCaller() {
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ } else {
+ funcVal = entry.Caller.Function
+ fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+ }
+
+ if funcVal != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc))
+ }
+ if fileVal != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile))
+ }
+ }
+
+ if !f.DisableSorting {
+ if f.SortingFunc == nil {
+ sort.Strings(keys)
+ fixedKeys = append(fixedKeys, keys...)
+ } else {
+ if !f.isColored() {
+ fixedKeys = append(fixedKeys, keys...)
+ f.SortingFunc(fixedKeys)
+ } else {
+ f.SortingFunc(keys)
+ }
+ }
+ } else {
+ fixedKeys = append(fixedKeys, keys...)
+ }
+
+ var b *bytes.Buffer
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ f.terminalInitOnce.Do(func() { f.init(entry) })
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = defaultTimestampFormat
+ }
+ if f.isColored() {
+ f.printColored(b, entry, keys, data, timestampFormat)
+ } else {
+
+ for _, key := range fixedKeys {
+ var value interface{}
+ switch {
+ case key == f.FieldMap.resolve(FieldKeyTime):
+ value = entry.Time.Format(timestampFormat)
+ case key == f.FieldMap.resolve(FieldKeyLevel):
+ value = entry.Level.String()
+ case key == f.FieldMap.resolve(FieldKeyMsg):
+ value = entry.Message
+ case key == f.FieldMap.resolve(FieldKeyLogrusError):
+ value = entry.err
+ case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
+ value = funcVal
+ case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
+ value = fileVal
+ default:
+ value = data[key]
+ }
+ f.appendKeyValue(b, key, value)
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel, TraceLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())
+ if !f.DisableLevelTruncation {
+ levelText = levelText[0:4]
+ }
+
+ // Remove a single newline if it already exists in the message to keep
+ // the behavior of logrus text_formatter the same as the stdlib log package
+ entry.Message = strings.TrimSuffix(entry.Message, "\n")
+
+ caller := ""
+ if entry.HasCaller() {
+ funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
+ fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+
+ if f.CallerPrettyfier != nil {
+ funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+ }
+
+ if fileVal == "" {
+ caller = funcVal
+ } else if funcVal == "" {
+ caller = fileVal
+ } else {
+ caller = fileVal + " " + funcVal
+ }
+ }
+
+ if f.DisableTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
+ } else if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
+ }
+ for _, k := range keys {
+ v := data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
+ }
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+ if f.QuoteEmptyFields && len(text) == 0 {
+ return true
+ }
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+ if b.Len() > 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ stringVal, ok := value.(string)
+ if !ok {
+ stringVal = fmt.Sprint(value)
+ }
+
+ if !f.needsQuoting(stringVal) {
+ b.WriteString(stringVal)
+ } else {
+ b.WriteString(fmt.Sprintf("%q", stringVal))
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 0000000..9e1f751
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,64 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+ return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+
+ switch level {
+ case TraceLevel:
+ printFunc = entry.Trace
+ case DebugLevel:
+ printFunc = entry.Debug
+ case InfoLevel:
+ printFunc = entry.Info
+ case WarnLevel:
+ printFunc = entry.Warn
+ case ErrorLevel:
+ printFunc = entry.Error
+ case FatalLevel:
+ printFunc = entry.Fatal
+ case PanicLevel:
+ printFunc = entry.Panic
+ default:
+ printFunc = entry.Print
+ }
+
+ go entry.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ entry.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/gocv.io/x/gocv/.astylerc b/vendor/gocv.io/x/gocv/.astylerc
new file mode 100644
index 0000000..e05f9df
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/.astylerc
@@ -0,0 +1,28 @@
+--lineend=linux
+
+--style=google
+
+--indent=spaces=4
+--indent-col1-comments
+--convert-tabs
+
+--attach-return-type
+--attach-namespaces
+--attach-classes
+--attach-inlines
+
+--add-brackets
+--add-braces
+
+--align-pointer=type
+--align-reference=type
+
+--max-code-length=100
+--break-after-logical
+
+--pad-comma
+--pad-oper
+--unpad-paren
+
+--break-blocks
+--pad-header
diff --git a/vendor/gocv.io/x/gocv/.dockerignore b/vendor/gocv.io/x/gocv/.dockerignore
new file mode 100644
index 0000000..1d085ca
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/.dockerignore
@@ -0,0 +1 @@
+**
diff --git a/vendor/gocv.io/x/gocv/.gitignore b/vendor/gocv.io/x/gocv/.gitignore
new file mode 100644
index 0000000..7549fff
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/.gitignore
@@ -0,0 +1,11 @@
+profile.cov
+count.out
+*.swp
+*.snap
+/parts
+/prime
+/stage
+.vscode/
+/build
+.idea/
+contrib/data.yaml
diff --git a/vendor/gocv.io/x/gocv/.travis.yml b/vendor/gocv.io/x/gocv/.travis.yml
new file mode 100644
index 0000000..5030bb0
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/.travis.yml
@@ -0,0 +1,60 @@
+# Use new container infrastructure to enable caching
+sudo: required
+dist: trusty
+
+# language is go
+language: go
+go:
+ - "1.13"
+go_import_path: gocv.io/x/gocv
+
+addons:
+ apt:
+ packages:
+ - libgmp-dev
+ - build-essential
+ - cmake
+ - git
+ - libgtk2.0-dev
+ - pkg-config
+ - libavcodec-dev
+ - libavformat-dev
+ - libswscale-dev
+ - libtbb2
+ - libtbb-dev
+ - libjpeg-dev
+ - libpng-dev
+ - libtiff-dev
+ - libjasper-dev
+ - libdc1394-22-dev
+ - xvfb
+
+before_install:
+ - ./travis_build_opencv.sh
+ - export PKG_CONFIG_PATH=$(pkg-config --variable pc_path pkg-config):$HOME/usr/lib/pkgconfig
+ - export INCLUDE_PATH=$HOME/usr/include:${INCLUDE_PATH}
+ - export LD_LIBRARY_PATH=$HOME/usr/lib:${LD_LIBRARY_PATH}
+ - sudo ln /dev/null /dev/raw1394
+ - export DISPLAY=:99.0
+ - sh -e /etc/init.d/xvfb start
+
+before_cache:
+ - rm -f $HOME/fresh-cache
+
+script:
+ - export GOCV_CAFFE_TEST_FILES="${HOME}/testdata"
+ - export GOCV_TENSORFLOW_TEST_FILES="${HOME}/testdata"
+ - export OPENCV_ENABLE_NONFREE=ON
+ - echo "Ensuring code is well formatted"; ! gofmt -s -d . | read
+ - go test -v -coverprofile=coverage.txt -covermode=atomic -tags matprofile .
+ - go test -tags matprofile ./contrib -coverprofile=contrib.txt -covermode=atomic; cat contrib.txt >> coverage.txt; rm contrib.txt;
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
+
+# Caching so the next build will be fast as possible.
+cache:
+ timeout: 1000
+ directories:
+ - $HOME/usr
+ - $HOME/testdata
diff --git a/vendor/gocv.io/x/gocv/CHANGELOG.md b/vendor/gocv.io/x/gocv/CHANGELOG.md
new file mode 100644
index 0000000..bf90513
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/CHANGELOG.md
@@ -0,0 +1,716 @@
+0.22.0
+---
+* **bgsegm**
+ * Add BackgroundSubtractorCNT
+* **calib3d**
+ * Added undistort function (#520)
+* **core**
+ * add functions (singular value decomposition, multiply between matrices, transpose matrix) (#559)
+ * Add new funcs (#578)
+ * add setIdentity() method to Mat
+ * add String method (#552)
+ * MatType: add missing constants
+* **dnn**
+ * Adding GetLayerNames()
+ * respect the bit depth of the input image to set the expected output when converting an image to a blob
+* **doc**
+ * change opencv version 3.x to 4.x
+* **docker**
+ * use Go1.13.5 for image
+* **imgcodecs**
+ * Fix webp image decode error (#523)
+imgcodecs: optimize copy of data used for IMDecode method
+* **imgproc**
+ * Add GetRectSubPix
+ * Added ClipLine
+ * Added InvertAffineTransform
+ * Added LinearPolar function (#524)
+ * correct ksize param used for MedianBlur unit test
+ * Feature/put text with line type (#527)
+ * FitEllipse
+ * In FillPoly and DrawContours functions, remove func() wrap to avoid memory freed before calling opencv functions. (#543)
+* **objdetect**
+ * Add support QR codes
+* **opencv**
+ * update to OpenCV 4.2.0 release
+* **openvino**
+ * Add openvino async
+* **test**
+ * Tolerate imprecise result in SolvePoly
+ * Tolerate imprecision in TestHoughLines
+
+0.21.0
+---
+* **build**
+ * added go clean --cache to clean target, see issue 458
+* **core**
+ * Add KMeans function
+ * added MeanWithMask function for Mats (#487)
+ * Fix possible resource leak
+* **cuda**
+ * added cudaoptflow
+ * added NewGpuMatFromMat which creates a GpuMat from a Mat
+ * Support for CUDA Image Warping (#494)
+* **dnn**
+ * add BlobFromImages (#467)
+ * add ImagesFromBlob (#468)
+* **docs**
+ * update ROADMAP with all recent contributions. Thank you!
+* **examples**
+ * face detection from image url by using IMDecode (#499)
+ * better format
+* **imgproc**
+ * Add calcBackProject
+ * Add CompareHist
+ * Add DistanceTransform and Watershed
+ * Add GrabCut
+ * Add Integral
+ * Add MorphologyExWithParams
+* **opencv**
+ * update to version 4.1.2
+* **openvino**
+ * updates needed for 2019 R3
+* **videoio**
+ * Added ToCodec to convert FOURCC string to numeric representation (#485)
+
+0.20.0
+---
+* **build**
+ * Use Go 1.12.x for build
+ * Update to OpenCV 4.1.0
+* **cuda**
+ * Initial cuda implementation
+* **docs**
+ * Fix the command to install xquartz via brew/cask
+* **features2d**
+ * Add support for SimpleBlobDetectorParams (#434)
+ * Added FastFeatureDetectorWithParams
+* **imgproc**
+ * Added function call to cv::morphologyDefaultBorderValue
+* **test**
+ * Increase test coverage for FP16BlobFromImage()
+* **video**
+ * Added calcOpticalFlowPyrLKWithParams
+ * Addition of MOG2/KNN constructor with options
+
+0.19.0
+---
+* **build**
+ * Adds Dockerfile. Updates Makefile and README.
+ * make maintainer tag same as dockerhub organization name
+ * make sure to run tests for non-free contrib algorithms
+ * update Appveyor build to use Go 1.12
+* **calib3d**
+ * add func InitUndistortRectifyMap (#405)
+* **cmd**
+ * correct formatting of code in example
+* **core**
+ * Added Bitwise Operations With Masks
+ * update to OpenCV4.0.1
+* **dnn**
+ * add new backend and target types for NVIDIA and FPGA
+ * Added blobFromImages in ROADMAP.md (#403)
+ * Implement dnn methods for loading in-memory models.
+* **docker**
+ * update Dockerfile to use OpenCV 4.0.1
+* **docs**
+ * update ROADMAP from recent contributions
+* **examples**
+ * Fixing filename in caffe-classifier example
+* **imgproc**
+ * Add 'MinEnclosingCircle' function
+ * added BoxPoints function and BorderIsolated const
+ * Added Connected Components
+ * Added the HoughLinesPointSet function.
+ * Implement CLAHE to imgproc
+* **openvino**
+ * remove lib no longer included during non-FPGA installations
+* **test**
+ * Add len(kp) == 232 to TestMSER, seems this is necessary for MacOS for some reason.
+
+0.18.0
+---
+* **build**
+ * add OPENCV_GENERATE_PKGCONFIG flag to generate pkg-config file
+ * Add required curl package to the RPM and DEBS
+ * correct name for zip directory used for code download
+ * Removing linking against face contrib module
+ * update CI to use 4.0.0 release
+ * update Makefile and Windows build command file to OpenCV 4.0.0
+ * use opencv4 file for pkg-config
+* **core**
+ * add ScaleAdd() method to Mat
+* **docs**
+ * replace OpenCV 3.4.3 references with OpenCV 4
+ * update macOS installation info to refer to new OpenCV 4.0 brew
+ * Updated function documentation with information about errors.
+* **examples**
+ * Improve accuracy in hand gesture sample
+* **features2d**
+ * update drawKeypoints() to use new stricter enum
+* **openvino**
+ * changes to accommodate release 2018R4
+* **profile**
+ * add build tag matprofile to allow for conditional inclusion of custom profile
+ * Add Mat profile wrapper in other areas of the library.
+ * Add MatProfile.
+ * Add MatProfileTest.
+ * move MatProfile tests into separate test file so they only run when custom profiler active
+* **test**
+ * Close images in tests.
+ * More Closes in tests.
+ * test that we are using 4.0.x version now
+* **videoio**
+ * Return the right type and error when opening VideoCapture fails
+
+0.17.0
+---
+* **build**
+ * Update Makefile
+ * update version of OpenCV used to 3.4.3
+ * use link to OpenCV 3.4.3 for Windows builds
+* **core**
+ * add mulSpectrums wrapper
+ * add PolarToCart() method to Mat
+ * add Reduce() method to Mat
+ * add Repeat() method to Mat
+ * add Solve() method to Mat
+ * add SolveCubic() method to Mat
+ * add SolvePoly() method to Mat
+ * add Sort() method to Mat
+ * add SortIdx() method to Mat
+ * add Trace() method to Mat
+ * Added new MatType
+ * Added Phase function
+* **dnn**
+ * update test to match OpenCV 3.4.3 behavior
+* **docs**
+ * Add example of how to run individual test
+ * adding instructions for installing pkgconfig for macOS
+ * fixed GOPATH bug.
+ * update ROADMAP from recent contributions
+* **examples**
+ * add condition to handle no circle found in circle detection example
+* **imgcodecs**
+ * Added IMEncodeWithParams function
+* **imgproc**
+ * Added Filter2D function
+ * Added fitLine function
+ * Added logPolar function
+ * Added Remap function
+ * Added SepFilter2D function
+ * Added Sobel function
+ * Added SpatialGradient function
+* **xfeatures2d**
+ * do not run SIFT test unless OpenCV was built using OPENCV_ENABLE_NONFREE
+ * do not run SURF test unless OpenCV was built using OPENCV_ENABLE_NONFREE
+
+0.16.0
+---
+* **build**
+ * add make task for Raspbian install with ARM hardware optimizations
+ * use all available cores to compile OpenCV on Windows as discussed in issue #275
+ * download performance improvements for OpenCV installs on Windows
+ * correct various errors and issues with OpenCV installs on Fedora and CentOS
+* **core**
+ * correct spelling error in constant to fix issue #269
+ * implemented & added test for Mat.SetTo
+ * improve Multiply() GoDoc and test showing Scalar() multiplication
+ * mutator functions for Mat add, subtract, multiply, and divide for uint8 and float32 values.
+* **dnn**
+ * add FP16BlobFromImage() function to convert an image Mat to a half-float aka FP16 slice of bytes
+* **docs**
+ * fix a varible error in example code in README
+
+0.15.0
+---
+* **build**
+ * add max to make -j
+ * improve path for Windows to use currently configured GOPATH
+* **core**
+ * Add Mat.DataPtr methods for direct access to OpenCV data
+ * Avoid extra copy in Mat.ToBytes + code review feedback
+* **dnn**
+ * add test coverage for ParseNetBackend and ParseNetTarget
+ * complete test coverage
+* **docs**
+ * minor cleanup of language for install
+ * use chdir instead of cd in Windows instructions
+* **examples**
+ * add 'hello, video' example to repo
+ * add HoughLinesP example
+ * correct message on device close to match actual event
+ * small change in display message for when file is input source
+ * use DrawContours in motion detect example
+* **imgproc**
+ * Add MinAreaRect() function
+* **test**
+ * filling test coverage gaps
+* **videoio**
+ * add test coverage for OpenVideoCapture
+
+0.14.0
+---
+* **build**
+ * Add -lopencv_calib3d341 to the linker
+ * auto-confirm on package installs from make deps command
+ * display PowerShell download status for OpenCV files
+ * obtain caffe test config file from new location in Travis build
+ * remove VS only dependencies from OpenCV build, copy caffe test config file from new location
+ * return back to GoCV directory after OpenCV install
+ * update for release of OpenCV v3.4.2
+ * use PowerShell for scripted OpenCV install for Windows
+ * win32 version number has not changed yet
+* **calib3d**
+ * Add Calibrate for Fisheye model(WIP)
+* **core**
+ * add GetTickCount function
+ * add GetTickFrequency function
+ * add Size() and FromPtr() methods to Mat
+ * add Total method to Mat
+ * Added RotateFlag type
+ * correct CopyTo to use pointer to Mat as destination
+ * functions converting Image to Mat
+ * rename implementation to avoid conflicts with Windows
+ * stricter use of reflect.SliceHeader
+* **dnn**
+ * add backend/device options to caffe and tensorflow DNN examples
+ * add Close to Layer
+ * add first version of dnn-pose-detection example
+ * add further comments to object detection/tracking DNN example
+ * add GetPerfProfile function to Net
+ * add initial Layer implementation alongside enhancements to Net
+ * add InputNameToIndex to Layer
+ * add new functions allowing DNN backends such as OpenVINO
+ * additional refactoring and comments in dnn-pose-detection example
+ * cleanup DNN face detection example
+ * correct const for device targets to be called Target
+ * correct test that expected init slice with blank entries
+ * do not init slice with blank entries, since added via append
+ * further cleanup of DNN face detection example
+ * make dnn-pose-detection example use Go channels for async operation
+ * refactoring and additional comments for object detection/tracking DNN example
+ * refine comment in header for style transfer example
+ * working style transfer example
+ * added ForwardLayers() to accomodate models with multiple output layers
+* **docs**
+ * add scripted Windows install info to README
+ * Added a sample gocv workflow contributing guideline
+ * mention docker image in README.
+ * mention work in progress on Android
+ * simplify and add missing step in Linux installation in README
+ * update contributing instructions to match latest version
+ * update ROADMAP from recent calib3d module contribution
+ * update ROADMAP from recent imgproc histogram contribution
+* **examples**
+ * cleanup header for caffe dnn classifier
+ * show how to use either Caffe or Tensorflow for DNN object detection
+ * further improve dnn samples
+ * rearrange and add comments to dnn style transfer example
+ * remove old copy of pose detector
+ * remove unused example
+* **features2d**
+ * free memory allocation bug for C.KeyPoints as pointed out by @tzununbekov
+ * Adding opencv::drawKeypoints() support
+* **imgproc**
+ * add equalizeHist function
+ * Added opencv::calcHist implementation
+* **openvino**
+ * add needed environment config to execute examples
+ * further details in README explaining how to use
+ * remove opencv contrib references as they are not included in OpenVINO
+* **videoio**
+ * Add OpenVideoCapture
+ * Use gocv.VideoCaptureFile if string is specified for device.
+
+0.13.0
+---
+* **build**
+ * Add cgo directives to contrib
+ * contrib subpackage also needs cpp 11 or greater for a warning free build on Linux
+ * Deprecate env scripts and update README
+ * Don't set --std=c++1z on non-macOS
+ * Remove CGO vars from CI and correct Windows cgo directives
+ * Support pkg-config via cgo directives
+ * we actually do need cpp 11 or greater for a warning free build on Linux
+* **docs**
+ * add a Github issue template to project
+ * provide specific examples of using custom environment
+* **imgproc**
+ * add HoughLinesPWithParams() function
+* **openvino**
+ * add build tag specific to openvino
+ * add roadmap info
+ * add smoke test for ie
+
+0.12.0
+---
+* **build**
+ * convert to CRLF
+ * Enable verbosity for travisCI
+ * Further improvements to Makefile
+* **core**
+ * Add Rotate, VConcat
+ * Adding InScalarRange and NewMatFromScalarWithSize functions
+ * Changed NewMatFromScalarWithSize to NewMatWithSizeFromScalar
+ * implement CheckRange(), Determinant(), EigenNonSymmetric(), Min(), and MinMaxIdx() functions
+ * implement PerspectiveTransform() and Sqrt() functions
+ * implement Transform() and Transpose() functions
+ * Make toByteArray safe for empty byte slices
+ * Renamed InScalarRange to InRangeWithScalar
+* **docs**
+ * nicer error if we can't read haarcascade_frontalface_default
+ * correct some ROADMAP links
+ * Fix example command.
+ * Fix executable name in help text.
+ * update ROADMAP from recent contributions
+* **imgproc**
+ * add BoxFilter and SqBoxFilter functions
+ * Fix the hack to convert C arrays to Go slices.
+* **videoio**
+ * Add isColor to VideoWriterFile
+ * Check numerical parameters for gocv.VideoWriterFile
+ * CodecString()
+* **features2d**
+ * add BFMatcher
+* **img_hash**
+ * Add contrib/img_hash module
+ * add GoDocs for new img_hash module
+ * Add img-similarity as an example for img_hash
+* **openvino**
+ * adds support for Intel OpenVINO toolkit PVL
+ * starting experimental work on OpenVINO IE
+ * update README files for Intel OpenVINO toolkit support
+ * WIP on IE can load an IR network
+
+0.11.0
+---
+* **build**
+ * Add astyle config
+ * Astyle cpp/h files
+ * remove duplication in Makefile for astyle
+* **core**
+ * Add GetVecfAt() function to Mat
+ * Add GetVeciAt() function to Mat
+ * Add Mat.ToImage()
+ * add MeanStdDev() method to Mat
+ * add more functions
+ * Compare Mat Type directly
+ * further cleanup for GoDocs and enforce type for convariance operations
+ * Make borderType in CopyMakeBorder be type BorderType
+ * Mat Type() should return MatType
+ * remove unused convenience functions
+ * use Mat* to indicate when a Mat is mutable aka an output parameter
+* **dnn**
+ * add a ssd sample and a GetBlobChannel helper
+ * added another helper func and a pose detection demo
+* **docs**
+ * add some additional detail about adding OpenCV functions to GoCV
+ * updates to contribution guidelines
+ * fill out complete list of needed imgproc functions for sections that have work started
+ * indicate that missing imgproc functions need implementation
+ * mention the WithParams patterns to be used for functions with default params
+ * update README for the Mat* based API changes
+ * update ROADMAP for recent changes especially awesome recent core contributions from @berak
+* **examples**
+ * Fix tf-classifier example
+ * move new DNN advanced examples into separate folders
+ * Update doc for the face contrib package
+ * Update links in caffe-classifier demo
+ * WIP on hand gestures tracking example
+* **highgui**
+ * fix constant in NewWindow
+* **imgproc**
+ * Add Ellipse() and FillPoly() functions
+ * Add HoughCirclesWithParams() func
+ * correct output Mat to for ConvexHull()
+ * rename param being used for Mat image to be modified
+* **tracking**
+ * add support for TrackerMIL, TrackerBoosting, TrackerMedianFlow, TrackerTLD, TrackerKCF, TrackerMOSSE, TrackerCSRT trackers
+ * removed mutitracker, added Csrt, rebased
+ * update GoDocs and minor renaming based on gometalint output
+
+0.10.0
+---
+* **build**
+ * install unzip before build
+ * overwrite when unzipping file to install Tensorflow test model
+ * use -DCPU_DISPATCH= flag for build to avoid problem with disabled AVX on Windows
+ * update unzipped file when installing Tensorflow test model
+* **core**
+ * add Compare() and CountNonZero() functions
+ * add getter/setter using optional params for multi-dimensional Mat using row/col/channel
+ * Add mat subtract function
+ * add new toRectangle function to DRY up conversion from CRects to []image.Rectangle
+ * add split subtract sum wrappers
+ * Add toCPoints() helper function
+ * Added Mat.CopyToWithMask() per #47
+ * added Pow() method
+ * BatchDistance BorderInterpolate CalcCovarMatrix CartToPolar
+ * CompleteSymm ConvertScaleAbs CopyMakeBorder Dct
+ * divide, multiply
+ * Eigen Exp ExtractChannels
+ * operations on a 3d Mat are not same as a 2d multichannel Mat
+ * resolve merge conflict with duplicate Subtract() function
+ * run gofmt on core tests
+ * Updated type for Mat.GetUCharAt() and Mat.SetUCharAt() to reflect uint8 instead of int8
+* **docs**
+ * update ROADMAP of completed functions in core from recent contributions
+* **env**
+ * check loading resources
+ * Add distribution detection to deps rule
+ * Add needed environment variables for Linux
+* **highgui**
+ * add some missing test coverage on WaitKey()
+* **imgproc**
+ * Add adaptive threshold function
+ * Add pyrDown and pyrUp functions
+ * Expose DrawContours()
+ * Expose WarpPerspective and GetPerspectiveTransform
+ * implement ConvexHull() and ConvexityDefects() functions
+* **opencv**
+ * update to OpenCV version 3.4.1
+
+0.9.0
+---
+* **bugfix**
+ * correct several errors in size parameter ordering
+* **build**
+ * add missing opencv_face lib reference to env.sh
+ * Support for non-brew installs of opencv on Darwin
+* **core**
+ * add Channels() method to Mat
+ * add ConvertTo() and NewMatFromBytes() functions
+ * add Type() method to Mat
+ * implement ConvertFp16() function
+* **dnn**
+ * use correct size for blob used for Caffe/Tensorflow tests
+* **docs**
+ * Update copyright date and Apache 2.0 license to include full text
+* **examples**
+ * cleanup mjpeg streamer code
+ * cleanup motion detector comments
+ * correct use of defer in loop
+ * use correct size for blob used for Caffe/Tensorflow examples
+* **imgproc**
+ * Add cv::approxPolyDP() bindings.
+ * Add cv::arcLength() bindings.
+ * Add cv::matchTemplate() bindings.
+ * correct comment and link for Blur function
+ * correct docs for BilateralFilter()
+
+0.8.0
+---
+* **core**
+ * add ColorMapFunctions and their test
+ * add Mat ToBytes
+ * add Reshape and MinMaxLoc functions
+ * also delete points
+ * fix mistake in the norm function by taking NormType instead of int as parameter
+ * SetDoubleAt func and his test
+ * SetFloatAt func and his test
+ * SetIntAt func and his test
+ * SetSCharAt func and his test
+ * SetShortAt func and his test
+ * SetUCharAt fun and his test
+ * use correct delete operator for array of new, eliminates a bunch of memory leaks
+* **dnn**
+ * add support for loading Tensorflow models
+ * adjust test for Caffe now that we are auto-cropping blob
+ * first pass at adding Caffe support
+ * go back to older function signature to avoid version conflicts with Intel CV SDK
+ * properly close DNN Net class
+ * use approx. value from test result to account forr windows precision differences
+* **features2d**
+ * implement GFTTDetector, KAZE, and MSER algorithms
+ * modify MSER test for Windows results
+* **highgui**
+ * un-deprecate WaitKey function needed for CLI apps
+* **imgcodec**
+ * add fileExt type
+* **imgproc**
+ * add the norm wrapper and use it in test for WarpAffine and WarpAffineWithParams
+ * GetRotationMatrix2D, WarpAffine and WarpAffineWithParams
+ * use NormL2 in wrap affine
+* **pvl**
+ * add support for FaceRecognizer
+ * complete wrappers for all missing FaceDetector functions
+ * update instructions to match R3 of Intel CV SDK
+* **docs**
+ * add more detail about exactly which functions are not yet implememented in the modules that are marked as 'Work Started'
+ * add refernece to Tensorflow example, and also suggest brew upgrade for MacOS
+ * improve ROADMAP to help would-be contributors know where to get started
+ * in the readme, explain compiling to a static library
+ * remove many godoc warnings by improving function descriptions
+ * update all OpenCV 3.3.1 references to v3.4.0
+ * update CGO_LDFLAGS references to match latest requirements
+ * update contribution guidelines to try to make it more inviting
+* **examples**
+ * add Caffe classifier example
+ * add Tensorflow classifier example
+ * fixed closing window in examples in infinite loop
+ * fixed format of the examples with gofmt
+* **test**
+ * add helper function for test : floatEquals
+ * add some attiribution from test function
+ * display OpenCV version in case that test fails
+ * add round function to allow for floating point accuracy differences due to GPU usage.
+* **build**
+ * improve search for already installed OpenCV on MacOS
+ * update Appveyor build to Opencv 3.4.0
+ * update to Opencv 3.4.0
+
+0.7.0
+---
+* **core**
+ * correct Merge implementation
+* **docs**
+ * change wording and formatting for roadmap
+ * update roadmap for a more complete list of OpenCV functionality
+ * sequence docs in README in same way as the web site, aka by OS
+ * show in README that some work was done on contrib face module
+* **face**
+ * LBPH facerecognizer bindings
+* **highgui**
+ * complete implementation for remaining API functions
+* **imgcodecs**
+ * add IMDecode function
+* **imgproc**
+ * elaborate on HoughLines & HoughLinesP tests to fetch a few individual results
+* **objdetect**
+ * add GroupRectangles function
+* **xfeatures2d**
+ * add SIFT and SURF algorithms from OpenCV contrib
+ * improve description for OpenCV contrib
+ * run tests from OpenCV contrib
+
+0.6.0
+---
+* **core**
+ * Add cv::LUT binding
+* **examples**
+ * do not try to go fullscreen, since does not work on OSX
+* **features2d**
+ * add AKAZE algorithm
+ * add BRISK algorithm
+ * add FastFeatureDetector algorithm
+ * implement AgastFeatureDetector algorithm
+ * implement ORB algorithm
+ * implement SimpleBlobDetector algorithm
+* **osx**
+ * Fix to get the OpenCV path with "brew info".
+* **highgui**
+ * use new Window with thread lock, and deprecate WaitKey() in favor of Window.WaitKey()
+ * use Window.WaitKey() in tests
+* **imgproc**
+ * add tests for HoughCircles
+* **pvl**
+ * use correct Ptr referencing
+* **video**
+ * use smart Ptr for Algorithms thanks to @alalek
+ * use unsafe.Pointer for Algorithm
+ * move tests to single file now that they all pass
+
+0.5.0
+---
+* **core**
+ * add TermCriteria for iterative algorithms
+* **imgproc**
+ * add CornerSubPix() and GoodFeaturesToTrack() for corner detection
+* **objdetect**
+ * add DetectMultiScaleWithParams() for HOGDescriptor
+ * add DetectMultiScaleWithParams() to allow override of defaults for CascadeClassifier
+* **video**
+ * add CalcOpticalFlowFarneback() for Farneback optical flow calculations
+ * add CalcOpticalFlowPyrLK() for Lucas-Kanade optical flow calculations
+* **videoio**
+ * use temp directory for Windows test compat.
+* **build**
+ * enable Appveyor build w/cache
+* **osx**
+ * update env path to always match installed OpenCV from Homebrew
+
+0.4.0
+---
+* **core**
+ * Added cv::mean binding with single argument
+ * fix the write-strings warning
+ * return temp pointer fix
+* **examples**
+ * add counter example
+ * add motion-detect command
+ * correct counter
+ * remove redundant cast and other small cleanup
+ * set motion detect example to fullscreen
+ * use MOG2 for continous motion detection, instead of simplistic first frame only
+* **highgui**
+ * ability to better control the fullscreen window
+* **imgproc**
+ * add BorderType param type for GaussianBlur
+ * add BoundingRect() function
+ * add ContourArea() function
+ * add FindContours() function along with associated data types
+ * add Laplacian and Scharr functions
+ * add Moments() function
+ * add Threshold function
+* **pvl**
+ * add needed lib for linker missing in README
+* **test**
+ * slightly more permissive version test
+* **videoio**
+ * Add image compression flags for gocv.IMWrite
+ * Fixed possible looping out of compression parameters length
+ * Make dedicated function to run cv::imwrite with compression parameters
+
+0.3.1
+---
+* **overall**
+ * Update to use OpenCV 3.3.1
+
+0.3.0
+---
+* **docs**
+ * Correct Windows build location from same @jpfarias fix to gocv-site
+* **core**
+ * Add Resize
+ * Add Mat merge and Discrete Fourier Transform
+ * Add CopyTo() and Normalize()
+ * Implement various important Mat logical operations
+* **video**
+ * BackgroundSubtractorMOG2 algorithm now working
+ * Add BackgroundSubtractorKNN algorithm from video module
+* **videoio**
+ * Add VideoCapture::get
+* **imgproc**
+ * Add BilateralFilter and MedianBlur
+ * Additional drawing functions implemented
+ * Add HoughCircles filter
+ * Implement various morphological operations
+* **highgui**
+ * Add Trackbar support
+* **objdetect**
+ * Add HOGDescriptor
+* **build**
+ * Remove race from test on Travis, since it causes CGo segfault in MOG2
+
+0.2.0
+---
+* Switchover to custom domain for package import
+* Yes, we have Windows
+
+0.1.0
+---
+Initial release!
+
+- [X] Video capture
+- [X] GUI Window to display video
+- [X] Image load/save
+- [X] CascadeClassifier for object detection/face tracking/etc.
+- [X] Installation instructions for Ubuntu
+- [X] Installation instructions for OS X
+- [X] Code example to use VideoWriter
+- [X] Intel CV SDK PVL FaceTracker support
+- [X] imgproc Image processing
+- [X] Travis CI build
+- [X] At least minimal test coverage for each OpenCV class
+- [X] Implement more of imgproc Image processing
\ No newline at end of file
diff --git a/vendor/gocv.io/x/gocv/CONTRIBUTING.md b/vendor/gocv.io/x/gocv/CONTRIBUTING.md
new file mode 100644
index 0000000..3d1ae54
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/CONTRIBUTING.md
@@ -0,0 +1,136 @@
+# How to contribute
+
+Thank you for your interest in improving GoCV.
+
+We would like your help to make this project better, so we appreciate any contributions. See if one of the following descriptions matches your situation:
+
+### Newcomer to GoCV, to OpenCV, or to computer vision in general
+
+We'd love to get your feedback on getting started with GoCV. Run into any difficulty, confusion, or anything else? You are not alone. We want to know about your experience, so we can help the next people. Please open a Github issue with your questions, or get in touch directly with us.
+
+### Something in GoCV is not working as you expect
+
+Please open a Github issue with your problem, and we will be happy to assist.
+
+### Something you want/need from OpenCV does not appear to be in GoCV
+
+We probably have not implemented it yet. Please take a look at our [ROADMAP.md](ROADMAP.md). Your pull request adding the functionality to GoCV would be greatly appreciated.
+
+### You found some Python code on the Internet that performs some computer vision task, and you want to do it using GoCV
+
+Please open a Github issue with your needs, and we can see what we can do.
+
+## How to use our Github repository
+
+The `master` branch of this repo will always have the latest released version of GoCV. All of the active development work for the next release will take place in the `dev` branch. GoCV will use semantic versioning and will create a tag/release for each release.
+
+Here is how to contribute back some code or documentation:
+
+- Fork repo
+- Create a feature branch off of the `dev` branch
+- Make some useful change
+- Submit a pull request against the `dev` branch.
+- Be kind
+
+## How to add a function from OpenCV to GoCV
+
+Here are a few basic guidelines on how to add a function from OpenCV to GoCV:
+
+- Please open a Github issue. We want to help, and also make sure that there is no duplications of efforts. Sometimes what you need is already being worked on by someone else.
+- Use the proper Go style naming `MissingFunction()` for the Go wrapper.
+- Make any output parameters `Mat*` to indicate to developers that the underlying OpenCV data will be changed by the function.
+- Use Go types when possible as parameters for example `image.Point` and then convert to the appropriate OpenCV struct. Also define a new type based on `int` and `const` values instead of just passing "magic numbers" as params. For example, the `VideoCaptureProperties` type used in `videoio.go`.
+- Always add the function to the GoCV file named the same as the OpenCV module to which the function belongs.
+- If the new function is in a module that is not yet implemented by GoCV, a new set of files for that module will need to be added.
+- Always add a "smoke" test for the new function being added. We are not testing OpenCV itself, but just the GoCV wrapper, so all that is needed generally is just exercising the new function.
+- If OpenCV has any default params for a function, we have been implementing 2 versions of the function since Go does not support overloading. For example, with a OpenCV function:
+
+```c
+opencv::xYZ(int p1, int p2, int p3=2, int p4=3);
+```
+
+We would define 2 functions in GoCV:
+
+```go
+// uses default param values
+XYZ(p1, p2)
+
+// sets each param
+XYZWithParams(p2, p2, p3, p4)
+```
+
+## How to run tests
+
+To run the tests:
+
+```
+go test .
+go test ./contrib/.
+```
+
+If you want to run an individual test, you can provide a RegExp to the `-run` argument:
+```
+go test -run TestMat
+```
+
+If you are using Intel OpenVINO, you can run those tests using:
+
+```
+go test ./openvino/...
+```
+
+## Contributing workflow
+
+This section provides a short description of one of many possible workflows you can follow to contribute to `CoCV`. This workflow is based on multiple [git remotes](https://git-scm.com/docs/git-remote) and it's by no means the only workflow you can use to contribute to `GoCV`. However, it's an option that might help you get started quickly without too much hassle as this workflow lets you work off the `gocv` repo directory path!
+
+Assuming you have already forked the `gocv` repo, you need to add a new `git remote` which will point to your GitHub fork. Notice below that you **must** `cd` to `gocv` repo directory before you add the new `git remote`:
+
+```shell
+cd $GOPATH/src/gocv.io/x/gocv
+git remote add gocv-fork https://github.com/YOUR_GH_HANDLE/gocv.git
+```
+
+Note, that in the command above we called our new `git remote`, **gocv-fork** for convenience so we can easily recognize it. You are free to choose any remote name of your liking.
+
+You should now see your new `git remote` when running the command below:
+
+```shell
+git remote -v
+
+gocv-fork https://github.com/YOUR_GH_HANDLE/gocv.git (fetch)
+gocv-fork https://github.com/YOUR_GH_HANDLE/gocv.git (push)
+origin https://github.com/hybridgroup/gocv (fetch)
+origin https://github.com/hybridgroup/gocv (push)
+```
+
+Before you create a new branch from `dev` you should fetch the latests commits from the `dev` branch:
+
+```shell
+git fetch origin dev
+```
+
+You want the `dev` branch in your `gocv` fork to be in sync with the `dev` branch of `gocv`, so push the earlier fetched commits to your GitHub fork as shown below. Note, the `-f` force switch might not be needed:
+
+```shell
+git push gocv-fork dev -f
+```
+
+Create a new feature branch from `dev`:
+
+```shell
+git checkout -b new-feature
+```
+
+After you've made your changes you can run the tests using the `make` command listed below. Note, you're still working off the `gocv` project root directory, hence running the command below does not require complicated `$GOPATH` rewrites or whatnot:
+
+```shell
+make test
+```
+
+Once the tests have passed, commit your new code to the `new-feature` branch and push it to your fork running the command below:
+
+```shell
+git push gocv-fork new-feature
+```
+
+You can now open a new PR from `new-feature` branch in your forked repo against the `dev` branch of `gocv`.
diff --git a/vendor/gocv.io/x/gocv/Dockerfile b/vendor/gocv.io/x/gocv/Dockerfile
new file mode 100644
index 0000000..36851b0
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/Dockerfile
@@ -0,0 +1,60 @@
+FROM ubuntu:16.04 AS opencv
+LABEL maintainer="hybridgroup"
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ git build-essential cmake pkg-config unzip libgtk2.0-dev \
+ curl ca-certificates libcurl4-openssl-dev libssl-dev \
+ libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
+ libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
+ rm -rf /var/lib/apt/lists/*
+
+ARG OPENCV_VERSION="4.2.0"
+ENV OPENCV_VERSION $OPENCV_VERSION
+
+RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
+ unzip -q opencv.zip && \
+ curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
+ unzip -q opencv_contrib.zip && \
+ rm opencv.zip opencv_contrib.zip && \
+ cd opencv-${OPENCV_VERSION} && \
+ mkdir build && cd build && \
+ cmake -D CMAKE_BUILD_TYPE=RELEASE \
+ -D CMAKE_INSTALL_PREFIX=/usr/local \
+ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
+ -D WITH_JASPER=OFF \
+ -D BUILD_DOCS=OFF \
+ -D BUILD_EXAMPLES=OFF \
+ -D BUILD_TESTS=OFF \
+ -D BUILD_PERF_TESTS=OFF \
+ -D BUILD_opencv_java=NO \
+ -D BUILD_opencv_python=NO \
+ -D BUILD_opencv_python2=NO \
+ -D BUILD_opencv_python3=NO \
+ -D OPENCV_GENERATE_PKGCONFIG=ON .. && \
+ make -j $(nproc --all) && \
+ make preinstall && make install && ldconfig && \
+ cd / && rm -rf opencv*
+
+#################
+# Go + OpenCV #
+#################
+FROM opencv AS gocv
+LABEL maintainer="hybridgroup"
+
+ARG GOVERSION="1.13.5"
+ENV GOVERSION $GOVERSION
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ git software-properties-common && \
+ curl -Lo go${GOVERSION}.linux-amd64.tar.gz https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
+ tar -C /usr/local -xzf go${GOVERSION}.linux-amd64.tar.gz && \
+ rm go${GOVERSION}.linux-amd64.tar.gz && \
+ rm -rf /var/lib/apt/lists/*
+
+ENV GOPATH /go
+ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
+
+RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
+WORKDIR $GOPATH
+
+RUN go get -u -d gocv.io/x/gocv && go run ${GOPATH}/src/gocv.io/x/gocv/cmd/version/main.go
diff --git a/vendor/gocv.io/x/gocv/LICENSE.txt b/vendor/gocv.io/x/gocv/LICENSE.txt
new file mode 100644
index 0000000..b68c83e
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2017-2019 The Hybrid Group
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gocv.io/x/gocv/Makefile b/vendor/gocv.io/x/gocv/Makefile
new file mode 100644
index 0000000..33a6140
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/Makefile
@@ -0,0 +1,138 @@
+.ONESHELL:
+.PHONY: test deps download build clean astyle cmds docker
+
+# OpenCV version to use.
+OPENCV_VERSION?=4.2.0
+
+# Go version to use when building Docker image
+GOVERSION?=1.13.1
+
+# Temporary directory to put files into.
+TMP_DIR?=/tmp/
+
+# Package list for each well-known Linux distribution
+RPMS=cmake curl git gtk2-devel libpng-devel libjpeg-devel libtiff-devel tbb tbb-devel libdc1394-devel unzip
+DEBS=unzip build-essential cmake curl git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev
+
+# Detect Linux distribution
+distro_deps=
+ifneq ($(shell which dnf 2>/dev/null),)
+ distro_deps=deps_fedora
+else
+ifneq ($(shell which apt-get 2>/dev/null),)
+ distro_deps=deps_debian
+else
+ifneq ($(shell which yum 2>/dev/null),)
+ distro_deps=deps_rh_centos
+endif
+endif
+endif
+
+# Install all necessary dependencies.
+deps: $(distro_deps)
+
+deps_rh_centos:
+ sudo yum -y install pkgconfig $(RPMS)
+
+deps_fedora:
+ sudo dnf -y install pkgconf-pkg-config $(RPMS)
+
+deps_debian:
+ sudo apt-get -y update
+ sudo apt-get -y install $(DEBS)
+
+
+# Download OpenCV source tarballs.
+download:
+ rm -rf $(TMP_DIR)opencv
+ mkdir $(TMP_DIR)opencv
+ cd $(TMP_DIR)opencv
+ curl -Lo opencv.zip https://github.com/opencv/opencv/archive/$(OPENCV_VERSION).zip
+ unzip -q opencv.zip
+ curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/$(OPENCV_VERSION).zip
+ unzip -q opencv_contrib.zip
+ rm opencv.zip opencv_contrib.zip
+ cd -
+
+# Build OpenCV.
+build:
+ cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
+ mkdir build
+ cd build
+ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON ..
+ $(MAKE) -j $(shell nproc --all)
+ $(MAKE) preinstall
+ cd -
+
+# Build OpenCV on Raspbian with ARM hardware optimizations.
+build_raspi:
+ cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
+ mkdir build
+ cd build
+ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=OFF -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D ENABLE_NEON=ON -D ENABLE_VFPV3=ON -D WITH_JASPER=OFF -D OPENCV_GENERATE_PKGCONFIG=ON ..
+ $(MAKE) -j $(shell nproc --all)
+ $(MAKE) preinstall
+ cd -
+
+# Build OpenCV with non-free contrib modules.
+build_nonfree:
+ cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
+ mkdir build
+ cd build
+ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON ..
+ $(MAKE) -j $(shell nproc --all)
+ $(MAKE) preinstall
+ cd -
+
+# Build OpenCV with cuda.
+build_cuda:
+ cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)
+ mkdir build
+ cd build
+ cmake -j $(shell nproc --all) -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=$(TMP_DIR)opencv/opencv_contrib-$(OPENCV_VERSION)/modules -D BUILD_DOCS=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_opencv_java=NO -D BUILD_opencv_python=NO -D BUILD_opencv_python2=NO -D BUILD_opencv_python3=NO -D WITH_JASPER=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_CUDA=ON -DENABLE_FAST_MATH=1 -DCUDA_FAST_MATH=1 -DWITH_CUBLAS=1 -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ -DBUILD_opencv_cudacodec=OFF ..
+ $(MAKE) -j $(shell nproc --all)
+ $(MAKE) preinstall
+ cd -
+
+# Cleanup temporary build files.
+clean:
+ go clean --cache
+ rm -rf $(TMP_DIR)opencv
+
+# Do everything.
+install: deps download build sudo_install clean verify
+
+# Do everything on Raspbian.
+install_raspi: deps download build_raspi sudo_install clean verify
+
+# Do everything with cuda.
+install_cuda: deps download build_cuda sudo_install clean verify
+
+# Install system wide.
+sudo_install:
+ cd $(TMP_DIR)opencv/opencv-$(OPENCV_VERSION)/build
+ sudo $(MAKE) install
+ sudo ldconfig
+ cd -
+
+# Build a minimal Go app to confirm gocv works.
+verify:
+ go run ./cmd/version/main.go
+
+# Runs tests.
+# This assumes env.sh was already sourced.
+# pvt is not tested here since it requires additional depenedences.
+test:
+ go test . ./contrib
+
+docker:
+ docker build --build-arg OPENCV_VERSION=$(OPENCV_VERSION) --build-arg GOVERSION=$(GOVERSION) .
+
+astyle:
+ astyle --project=.astylerc --recursive *.cpp,*.h
+
+CMDS=basic-drawing caffe-classifier captest capwindow counter faceblur facedetect find-circles hand-gestures img-similarity mjpeg-streamer motion-detect pose saveimage savevideo showimage ssd-facedetect tf-classifier tracking version
+cmds:
+ for cmd in $(CMDS) ; do \
+ go build -o build/$$cmd cmd/$$cmd/main.go ;
+ done ; \
diff --git a/vendor/gocv.io/x/gocv/README.md b/vendor/gocv.io/x/gocv/README.md
new file mode 100644
index 0000000..19ff531
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/README.md
@@ -0,0 +1,559 @@
+# GoCV
+
+[](http://gocv.io/)
+
+[](https://godoc.org/github.com/hybridgroup/gocv)
+[](https://travis-ci.org/hybridgroup/gocv)
+[](https://ci.appveyor.com/project/deadprogram/gocv/branch/dev)
+[](https://codecov.io/gh/hybridgroup/gocv)
+[](https://goreportcard.com/report/github.com/hybridgroup/gocv)
+[](https://github.com/hybridgroup/gocv/blob/master/LICENSE.txt)
+
+The GoCV package provides Go language bindings for the [OpenCV 4](http://opencv.org/) computer vision library.
+
+The GoCV package supports the latest releases of Go and OpenCV (v4.2.0) on Linux, macOS, and Windows. We intend to make the Go language a "first-class" client compatible with the latest developments in the OpenCV ecosystem.
+
+GoCV also supports [Intel OpenVINO](https://software.intel.com/en-us/openvino-toolkit). Check out the [OpenVINO README](./openvino/README.md) for more info on how to use GoCV with the Intel OpenVINO toolkit.
+
+## How to use
+
+### Hello, video
+
+This example opens a video capture device using device "0", reads frames, and shows the video in a GUI window:
+
+```go
+package main
+
+import (
+ "gocv.io/x/gocv"
+)
+
+func main() {
+ webcam, _ := gocv.OpenVideoCapture(0)
+ window := gocv.NewWindow("Hello")
+ img := gocv.NewMat()
+
+ for {
+ webcam.Read(&img)
+ window.IMShow(img)
+ window.WaitKey(1)
+ }
+}
+```
+
+### Face detect
+
+
+
+This is a more complete example that opens a video capture device using device "0". It also uses the CascadeClassifier class to load an external data file containing the classifier data. The program grabs each frame from the video, then uses the classifier to detect faces. If any faces are found, it draws a green rectangle around each one, then displays the video in an output window:
+
+```go
+package main
+
+import (
+ "fmt"
+ "image/color"
+
+ "gocv.io/x/gocv"
+)
+
+func main() {
+ // set to use a video capture device 0
+ deviceID := 0
+
+ // open webcam
+ webcam, err := gocv.OpenVideoCapture(deviceID)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ defer webcam.Close()
+
+ // open display window
+ window := gocv.NewWindow("Face Detect")
+ defer window.Close()
+
+ // prepare image matrix
+ img := gocv.NewMat()
+ defer img.Close()
+
+ // color for the rect when faces detected
+ blue := color.RGBA{0, 0, 255, 0}
+
+ // load classifier to recognize faces
+ classifier := gocv.NewCascadeClassifier()
+ defer classifier.Close()
+
+ if !classifier.Load("data/haarcascade_frontalface_default.xml") {
+ fmt.Println("Error reading cascade file: data/haarcascade_frontalface_default.xml")
+ return
+ }
+
+ fmt.Printf("start reading camera device: %v\n", deviceID)
+ for {
+ if ok := webcam.Read(&img); !ok {
+ fmt.Printf("cannot read device %v\n", deviceID)
+ return
+ }
+ if img.Empty() {
+ continue
+ }
+
+ // detect faces
+ rects := classifier.DetectMultiScale(img)
+ fmt.Printf("found %d faces\n", len(rects))
+
+ // draw a rectangle around each face on the original image
+ for _, r := range rects {
+ gocv.Rectangle(&img, r, blue, 3)
+ }
+
+ // show the image in the window, and wait 1 millisecond
+ window.IMShow(img)
+ window.WaitKey(1)
+ }
+}
+```
+
+### More examples
+
+There are examples in the [cmd directory](./cmd) of this repo in the form of various useful command line utilities, such as [capturing an image file](./cmd/saveimage), [streaming mjpeg video](./cmd/mjpeg-streamer), [counting objects that cross a line](./cmd/counter), and [using OpenCV with Tensorflow for object classification](./cmd/tf-classifier).
+
+## How to install
+
+To install GoCV, run the following command:
+
+```
+go get -u -d gocv.io/x/gocv
+```
+
+To run code that uses the GoCV package, you must also install OpenCV 4.2.0 on your system. Here are instructions for Ubuntu, Raspian, macOS, and Windows.
+
+## Ubuntu/Linux
+
+### Installation
+
+You can use `make` to install OpenCV 4.2.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
+
+#### Quick Install
+
+The following commands should do everything to download and install OpenCV 4.2.0 on Linux:
+
+ cd $GOPATH/src/gocv.io/x/gocv
+ make install
+
+If it works correctly, at the end of the entire process, the following message should be displayed:
+
+ gocv version: 0.22.0
+ opencv lib version: 4.2.0
+
+That's it, now you are ready to use GoCV.
+
+#### Complete Install
+
+If you have already done the "Quick Install" as described above, you do not need to run any further commands. For the curious, or for custom installations, here are the details for each of the steps that are performed when you run `make install`.
+
+##### Install required packages
+
+First, you need to change the current directory to the location of the GoCV repo, so you can access the `Makefile`:
+
+ cd $GOPATH/src/gocv.io/x/gocv
+
+Next, you need to update the system, and install any required packages:
+
+ make deps
+
+#### Download source
+
+Now, download the OpenCV 4.2.0 and OpenCV Contrib source code:
+
+ make download
+
+#### Build
+
+Build everything. This will take quite a while:
+
+ make build
+
+#### Install
+
+Once the code is built, you are ready to install:
+
+ make sudo_install
+
+### Verifying the installation
+
+To verify your installation you can run one of the included examples.
+
+First, change the current directory to the location of the GoCV repo:
+
+ cd $GOPATH/src/gocv.io/x/gocv
+
+Now you should be able to build or run any of the examples:
+
+ go run ./cmd/version/main.go
+
+The version program should output the following:
+
+ gocv version: 0.22.0
+ opencv lib version: 4.2.0
+
+#### Cleanup extra files
+
+After the installation is complete, you can remove the extra files and folders:
+
+ make clean
+
+### Cache builds
+
+If you are running a version of Go older than v1.10 and not modifying GoCV source, precompile the GoCV package to significantly decrease your build times:
+
+ go install gocv.io/x/gocv
+
+### Custom Environment
+
+By default, pkg-config is used to determine the correct flags for compiling and linking OpenCV. This behavior can be disabled by supplying `-tags customenv` when building/running your application. When building with this tag you will need to supply the CGO environment variables yourself.
+
+For example:
+
+ export CGO_CPPFLAGS="-I/usr/local/include"
+ export CGO_LDFLAGS="-L/usr/local/lib -lopencv_core -lopencv_face -lopencv_videoio -lopencv_imgproc -lopencv_highgui -lopencv_imgcodecs -lopencv_objdetect -lopencv_features2d -lopencv_video -lopencv_dnn -lopencv_xfeatures2d"
+
+Please note that you will need to run these 2 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
+
+ go run -tags customenv ./cmd/version/main.go
+
+### Docker
+
+The project now provides `Dockerfile` which lets you build [GoCV](https://gocv.io/) Docker image which you can then use to build and run `GoCV` applications in Docker containers. The `Makefile` contains `docker` target which lets you build Docker image with a single command:
+
+```
+make docker
+```
+
+By default Docker image built by running the command above ships [Go](https://golang.org/) version `1.13.5`, but if you would like to build an image which uses different version of `Go` you can override the default value when running the target command:
+
+```
+make docker GOVERSION='1.13.5'
+```
+
+#### Running GUI programs in Docker on macOS
+
+Sometimes your `GoCV` programs create graphical interfaces like windows eg. when you use `gocv.Window` type when you display an image or video stream. Running the programs which create graphical interfaces in Docker container on macOS is unfortunately a bit elaborate, but not impossible. First you need to satisfy the following prerequisites:
+* install [xquartz](https://www.xquartz.org/). You can also install xquartz using [homebrew](https://brew.sh/) by running `brew cask install xquartz`
+* install [socat](https://linux.die.net/man/1/socat) `brew install socat`
+
+Note, you will have to log out and log back in to your machine once you have installed `xquartz`. This is so the X window system is reloaded.
+
+Once you have installed all the prerequisites you need to allow connections from network clients to `xquartz`. Here is how you do that. First run the following command to open `xquart` so you can configure it:
+
+```shell
+open -a xquartz
+```
+Click on *Security* tab in preferences and check the "Allow connections" box:
+
+
+
+Next, you need to create a TCP proxy using `socat` which will stream [X Window](https://en.wikipedia.org/wiki/X_Window_System) data into `xquart`. Before you start the proxy you need to make sure that there is no process listening in port `6000`. The following command should **not** return any results:
+
+```shell
+lsof -i TCP:6000
+```
+Now you can start a local proxy which will proxy the X Window traffic into xquartz which acts a your local X server:
+
+```shell
+socat TCP-LISTEN:6000,reuseaddr,fork UNIX-CLIENT:\"$DISPLAY\"
+```
+
+You are now finally ready to run your `GoCV` GUI programs in Docker containers. In order to make everything work you must set `DISPLAY` environment variables as shown in a sample command below:
+
+```shell
+docker run -it --rm -e DISPLAY=docker.for.mac.host.internal:0 your-gocv-app
+```
+
+**Note, since Docker for MacOS does not provide any video device support, you won't be able run GoCV apps which require camera.**
+
+### Alpine 3.7 Docker image
+
+There is a Docker image with Alpine 3.7 that has been created by project contributor [@denismakogon](https://github.com/denismakogon). You can find it located at [https://github.com/denismakogon/gocv-alpine](https://github.com/denismakogon/gocv-alpine).
+
+## Raspbian
+
+### Installation
+
+We have a special installation for the Raspberry Pi that includes some hardware optimizations. You use `make` to install OpenCV 4.2.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
+
+#### Quick Install
+
+The following commands should do everything to download and install OpenCV 4.2.0 on Raspbian:
+
+ cd $GOPATH/src/gocv.io/x/gocv
+ make install_raspi
+
+If it works correctly, at the end of the entire process, the following message should be displayed:
+
+ gocv version: 0.22.0
+ opencv lib version: 4.2.0
+
+That's it, now you are ready to use GoCV.
+
+## macOS
+
+### Installation
+
+You can install OpenCV 4.2.0 using Homebrew.
+
+If you already have an earlier version of OpenCV (3.4.x) installed, you should probably remove it before installing the new version:
+
+ brew uninstall opencv
+
+You can then install OpenCV 4.2.0:
+
+ brew install opencv
+
+### pkgconfig Installation
+pkg-config is used to determine the correct flags for compiling and linking OpenCV.
+You can install it by using Homebrew:
+
+ brew install pkgconfig
+
+### Verifying the installation
+
+To verify your installation you can run one of the included examples.
+
+First, change the current directory to the location of the GoCV repo:
+
+ cd $GOPATH/src/gocv.io/x/gocv
+
+Now you should be able to build or run any of the examples:
+
+ go run ./cmd/version/main.go
+
+The version program should output the following:
+
+ gocv version: 0.22.0
+ opencv lib version: 4.2.0
+
+### Cache builds
+
+If you are running a version of Go older than v1.10 and not modifying GoCV source, precompile the GoCV package to significantly decrease your build times:
+
+ go install gocv.io/x/gocv
+
+### Custom Environment
+
+By default, pkg-config is used to determine the correct flags for compiling and linking OpenCV. This behavior can be disabled by supplying `-tags customenv` when building/running your application. When building with this tag you will need to supply the CGO environment variables yourself.
+
+For example:
+
+ export CGO_CXXFLAGS="--std=c++11"
+ export CGO_CPPFLAGS="-I/usr/local/Cellar/opencv/4.2.0/include"
+ export CGO_LDFLAGS="-L/usr/local/Cellar/opencv/4.2.0/lib -lopencv_stitching -lopencv_superres -lopencv_videostab -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_photo -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_optflow -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_dnn -lopencv_plot -lopencv_xfeatures2d -lopencv_shape -lopencv_video -lopencv_ml -lopencv_ximgproc -lopencv_calib3d -lopencv_features2d -lopencv_highgui -lopencv_videoio -lopencv_flann -lopencv_xobjdetect -lopencv_imgcodecs -lopencv_objdetect -lopencv_xphoto -lopencv_imgproc -lopencv_core"
+
+Please note that you will need to run these 3 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
+
+ go run -tags customenv ./cmd/version/main.go
+
+## Windows
+
+### Installation
+
+The following assumes that you are running a 64-bit version of Windows 10.
+
+In order to build and install OpenCV 4.2.0 on Windows, you must first download and install MinGW-W64 and CMake, as follows.
+
+#### MinGW-W64
+
+Download and run the MinGW-W64 compiler installer from [https://sourceforge.net/projects/mingw-w64/?source=typ_redirect](https://sourceforge.net/projects/mingw-w64/?source=typ_redirect).
+
+The latest version of the MinGW-W64 toolchain is `7.3.0`, but any version from `7.X` on should work.
+
+Choose the options for "posix" threads, and for "seh" exceptions handling, then install to the default location `c:\Program Files\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev2`.
+
+Add the `C:\Program Files\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev2\mingw64\bin` path to your System Path.
+
+#### CMake
+
+Download and install CMake [https://cmake.org/download/](https://cmake.org/download/) to the default location. CMake installer will add CMake to your system path.
+
+#### OpenCV 4.2.0 and OpenCV Contrib Modules
+
+The following commands should do everything to download and install OpenCV 4.2.0 on Windows:
+
+ chdir %GOPATH%\src\gocv.io\x\gocv
+ win_build_opencv.cmd
+
+It might take up to one hour.
+
+Last, add `C:\opencv\build\install\x64\mingw\bin` to your System Path.
+
+### Verifying the installation
+
+Change the current directory to the location of the GoCV repo:
+
+ chdir %GOPATH%\src\gocv.io\x\gocv
+
+Now you should be able to build or run any of the command examples:
+
+ go run cmd\version\main.go
+
+The version program should output the following:
+
+ gocv version: 0.22.0
+ opencv lib version: 4.2.0
+
+That's it, now you are ready to use GoCV.
+
+### Cache builds
+
+If you are running a version of Go older than v1.10 and not modifying GoCV source, precompile the GoCV package to significantly decrease your build times:
+
+ go install gocv.io/x/gocv
+
+### Custom Environment
+
+By default, OpenCV is expected to be in `C:\opencv\build\install\include`. This behavior can be disabled by supplying `-tags customenv` when building/running your application. When building with this tag you will need to supply the CGO environment variables yourself.
+
+Due to the way OpenCV produces DLLs, including the version in the name, using this method is required if you're using a different version of OpenCV.
+
+For example:
+
+ set CGO_CXXFLAGS="--std=c++11"
+ set CGO_CPPFLAGS=-IC:\opencv\build\install\include
+ set CGO_LDFLAGS=-LC:\opencv\build\install\x64\mingw\lib -lopencv_core412 -lopencv_face412 -lopencv_videoio412 -lopencv_imgproc412 -lopencv_highgui412 -lopencv_imgcodecs412 -lopencv_objdetect412 -lopencv_features2d412 -lopencv_video412 -lopencv_dnn412 -lopencv_xfeatures2d412 -lopencv_plot412 -lopencv_tracking412 -lopencv_img_hash412
+
+Please note that you will need to run these 3 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
+
+ go run -tags customenv cmd\version\main.go
+
+## Android
+
+There is some work in progress for running GoCV on Android using Gomobile. For information on how to install OpenCV/GoCV for Android, please see:
+https://gist.github.com/ogero/c19458cf64bd3e91faae85c3ac887481
+
+See original discussion here:
+https://github.com/hybridgroup/gocv/issues/235
+
+## Profiling
+
+Since memory allocations for images in GoCV are done through C based code, the go garbage collector will not clean all resources associated with a `Mat`. As a result, any `Mat` created *must* be closed to avoid memory leaks.
+
+To ease the detection and repair of the resource leaks, GoCV provides a `Mat` profiler that records when each `Mat` is created and closed. Each time a `Mat` is allocated, the stack trace is added to the profile. When it is closed, the stack trace is removed. See the [runtime/pprof documentation](https://golang.org/pkg/runtime/pprof/#Profile).
+
+In order to include the MatProfile custom profiler, you MUST build or run your application or tests using the `-tags matprofile` build tag. For example:
+
+ go run -tags matprofile cmd/version/main.go
+
+You can get the profile's count at any time using:
+
+```go
+gocv.MatProfile.Count()
+```
+
+You can display the current entries (the stack traces) with:
+
+```go
+var b bytes.Buffer
+gocv.MatProfile.WriteTo(&b, 1)
+fmt.Print(b.String())
+```
+
+This can be very helpful to track down a leak. For example, suppose you have
+the following nonsense program:
+
+```go
+package main
+
+import (
+ "bytes"
+ "fmt"
+
+ "gocv.io/x/gocv"
+)
+
+func leak() {
+ gocv.NewMat()
+}
+
+func main() {
+ fmt.Printf("initial MatProfile count: %v\n", gocv.MatProfile.Count())
+ leak()
+
+ fmt.Printf("final MatProfile count: %v\n", gocv.MatProfile.Count())
+ var b bytes.Buffer
+ gocv.MatProfile.WriteTo(&b, 1)
+ fmt.Print(b.String())
+}
+```
+
+Running this program produces the following output:
+
+```
+initial MatProfile count: 0
+final MatProfile count: 1
+gocv.io/x/gocv.Mat profile: total 1
+1 @ 0x40b936c 0x40b93b7 0x40b94e2 0x40b95af 0x402cd87 0x40558e1
+# 0x40b936b gocv.io/x/gocv.newMat+0x4b /go/src/gocv.io/x/gocv/core.go:153
+# 0x40b93b6 gocv.io/x/gocv.NewMat+0x26 /go/src/gocv.io/x/gocv/core.go:159
+# 0x40b94e1 main.leak+0x21 /go/src/github.com/dougnd/gocvprofexample/main.go:11
+# 0x40b95ae main.main+0xae /go/src/github.com/dougnd/gocvprofexample/main.go:16
+# 0x402cd86 runtime.main+0x206 /usr/local/Cellar/go/1.11.1/libexec/src/runtime/proc.go:201
+```
+
+We can see that this program would leak memory. As it exited, it had one `Mat` that was never closed. The stack trace points to exactly which line the allocation happened on (line 11, the `gocv.NewMat()`).
+
+Furthermore, if the program is a long running process or if GoCV is being used on a web server, it may be helpful to install the HTTP interface )). For example:
+
+```go
+package main
+
+import (
+ "net/http"
+ _ "net/http/pprof"
+ "time"
+
+ "gocv.io/x/gocv"
+)
+
+func leak() {
+ gocv.NewMat()
+}
+
+func main() {
+ go func() {
+ ticker := time.NewTicker(time.Second)
+ for {
+ <-ticker.C
+ leak()
+ }
+ }()
+
+ http.ListenAndServe("localhost:6060", nil)
+}
+
+```
+
+This will leak a `Mat` once per second. You can see the current profile count and stack traces by going to the installed HTTP debug interface: [http://localhost:6060/debug/pprof/gocv.io/x/gocv.Mat](http://localhost:6060/debug/pprof/gocv.io/x/gocv.Mat?debug=1).
+
+
+## How to contribute
+
+Please take a look at our [CONTRIBUTING.md](./CONTRIBUTING.md) document to understand our contribution guidelines.
+
+Then check out our [ROADMAP.md](./ROADMAP.md) document to know what to work on next.
+
+## Why this project exists
+
+The [https://github.com/go-opencv/go-opencv](https://github.com/go-opencv/go-opencv) package for Go and OpenCV does not support any version above OpenCV 2.x, and work on adding support for OpenCV 3 had stalled for over a year, mostly due to the complexity of [SWIG](http://swig.org/). That is why we started this project.
+
+The GoCV package uses a C-style wrapper around the OpenCV 4 C++ classes to avoid having to deal with applying SWIG to a huge existing codebase. The mappings are intended to match as closely as possible to the original OpenCV project structure, to make it easier to find things, and to be able to figure out where to add support to GoCV for additional OpenCV image filters, algorithms, and other features.
+
+For example, the [OpenCV `videoio` module](https://github.com/opencv/opencv/tree/master/modules/videoio) wrappers can be found in the GoCV package in the `videoio.*` files.
+
+This package was inspired by the original https://github.com/go-opencv/go-opencv project, the blog post https://medium.com/@peterleyssens/using-opencv-3-from-golang-5510c312a3c and the repo at https://github.com/sensorbee/opencv thank you all!
+
+## License
+
+Licensed under the Apache 2.0 license. Copyright (c) 2017-2019 The Hybrid Group.
+
+Logo generated by GopherizeMe - https://gopherize.me
diff --git a/vendor/gocv.io/x/gocv/ROADMAP.md b/vendor/gocv.io/x/gocv/ROADMAP.md
new file mode 100644
index 0000000..b917a34
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/ROADMAP.md
@@ -0,0 +1,262 @@
+# Roadmap
+
+This is a list of all of the functionality areas within OpenCV, and OpenCV Contrib.
+
+Any section listed with an "X" means that all of the relevant OpenCV functionality has been wrapped for use within GoCV.
+
+Any section listed with **WORK STARTED** indicates that some work has been done, but not all functionality in that module has been completed. If there are any functions listed under a section marked **WORK STARTED**, it indicates that that function still requires a wrapper implemented.
+
+And any section that is simply listed, indicates that so far, no work has been done on that module.
+
+Your pull requests will be greatly appreciated!
+
+## Modules list
+
+- [ ] **core. Core functionality - WORK STARTED**
+ - [ ] **Basic structures - WORK STARTED**
+ - [ ] **Operations on arrays - WORK STARTED**. The following functions still need implementation:
+ - [ ] [Mahalanobis](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4493aee129179459cbfc6064f051aa7d)
+ - [ ] [mixChannels](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga51d768c270a1cdd3497255017c4504be)
+ - [ ] [mulTransposed](https://docs.opencv.org/master/d2/de8/group__core__array.html#gadc4e49f8f7a155044e3be1b9e3b270ab)
+ - [ ] [PCABackProject](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab26049f30ee8e94f7d69d82c124faafc)
+ - [ ] [PCACompute](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4e2073c7311f292a0648f04c37b73781)
+ - [ ] [PCAProject](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6b9fbc7b3a99ebfd441bbec0a6bc4f88)
+ - [ ] [PSNR](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga07aaf34ae31d226b1b847d8bcff3698f)
+ - [ ] [randn](https://docs.opencv.org/master/d2/de8/group__core__array.html#gaeff1f61e972d133a04ce3a5f81cf6808)
+ - [ ] [randShuffle](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6a789c8a5cb56c6dd62506179808f763)
+ - [ ] [randu](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga1ba1026dca0807b27057ba6a49d258c0)
+ - [x] [setIdentity](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga388d7575224a4a277ceb98ccaa327c99)
+ - [ ] [setRNGSeed](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga757e657c037410d9e19e819569e7de0f)
+ - [ ] [SVBackSubst](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab4e620e6fc6c8a27bb2be3d50a840c0b)
+ - [ ] [SVDecomp](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab477b5b7b39b370bb03e75b19d2d5109)
+ - [ ] [theRNG](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga75843061d150ad6564b5447e38e57722)
+ - [ ] XML/YAML Persistence
+ - [ ] **Clustering - WORK STARTED**. The following functions still need implementation:
+ - [ ] [partition](https://docs.opencv.org/master/d5/d38/group__core__cluster.html#ga2037c989e69b499c1aa271419f3a9b34)
+
+ - [ ] Utility and system functions and macros
+ - [ ] OpenGL interoperability
+ - [ ] Intel IPP Asynchronous C/C++ Converters
+ - [ ] Optimization Algorithms
+ - [ ] OpenCL support
+
+- [ ] **imgproc. Image processing - WORK STARTED**
+ - [ ] **Image Filtering - WORK STARTED** The following functions still need implementation:
+ - [ ] [buildPyramid](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gacfdda2bc1ac55e96de7e9f0bce7238c0)
+ - [ ] [getDerivKernels](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga6d6c23f7bd3f5836c31cfae994fc4aea)
+ - [ ] [getGaborKernel](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gae84c92d248183bd92fa713ce51cc3599)
+ - [ ] [getGaussianKernel](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gac05a120c1ae92a6060dd0db190a61afa)
+ - [ ] [morphologyExWithParams](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga67493776e3ad1a3df63883829375201f)
+ - [ ] [pyrMeanShiftFiltering](https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga9fabdce9543bd602445f5db3827e4cc0)
+
+ - [ ] **Geometric Image Transformations - WORK STARTED** The following functions still need implementation:
+ - [ ] [convertMaps](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga9156732fa8f01be9ebd1a194f2728b7f)
+ - [ ] [getAffineTransform](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga8f6d378f9f8eebb5cb55cd3ae295a999)
+ - [ ] [getDefaultNewCameraMatrix](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga744529385e88ef7bc841cbe04b35bfbf)
+ - [X] [getRectSubPix](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga77576d06075c1a4b6ba1a608850cd614)
+ - [ ] [initUndistortRectifyMap](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga7dfb72c9cf9780a347fbe3d1c47e5d5a)
+ - [ ] [initWideAngleProjMap](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#gaceb049ec48898d1dadd5b50c604429c8)
+ - [ ] [undistort](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga69f2545a8b62a6b0fc2ee060dc30559d)
+ - [ ] [undistortPoints](https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga55c716492470bfe86b0ee9bf3a1f0f7e)
+
+ - [ ] **Miscellaneous Image Transformations - WORK STARTED** The following functions still need implementation:
+ - [ ] [cvtColorTwoPlane](https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga8e873314e72a1a6c0252375538fbf753)
+ - [ ] [floodFill](https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#gaf1f55a048f8a45bc3383586e80b1f0d0)
+
+ - [ ] **Drawing Functions - WORK STARTED** The following functions still need implementation:
+ - [X] [clipLine](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#gaf483cb46ad6b049bc35ec67052ef1c2c)
+ - [ ] [drawMarker](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga482fa7b0f578fcdd8a174904592a6250)
+ - [ ] [ellipse2Poly](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga727a72a3f6a625a2ae035f957c61051f)
+ - [ ] [fillConvexPoly](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga906aae1606ea4ed2f27bec1537f6c5c2)
+ - [ ] [getFontScaleFromHeight](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga442ff925c1a957794a1309e0ed3ba2c3)
+ - [ ] [polylines](https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga444cb8a2666320f47f09d5af08d91ffb)
+
+ - [ ] ColorMaps in OpenCV
+ - [ ] Planar Subdivision
+ - [ ] **Histograms - WORK STARTED** The following functions still need implementation:
+ - [ ] [EMD](https://docs.opencv.org/master/d6/dc7/group__imgproc__hist.html#ga902b8e60cc7075c8947345489221e0e0)
+ - [ ] [wrapperEMD](https://docs.opencv.org/master/d6/dc7/group__imgproc__hist.html#ga31fdda0864e64ca6b9de252a2611758b)
+
+ - [ ] **Structural Analysis and Shape Descriptors - WORK STARTED** The following functions still need implementation:
+ - [ ] [fitEllipse](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gaf259efaad93098103d6c27b9e4900ffa)
+ - [ ] [fitEllipseAMS](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga69e90cda55c4e192a8caa0b99c3e4550)
+ - [ ] [fitEllipseDirect](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga6421884fd411923a74891998bbe9e813)
+ - [ ] [HuMoments](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gab001db45c1f1af6cbdbe64df04c4e944)
+ - [ ] [intersectConvexConvex](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8e840f3f3695613d32c052bec89e782c)
+ - [ ] [isContourConvex](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8abf8010377b58cbc16db6734d92941b)
+ - [ ] [matchShapes](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gaadc90cb16e2362c9bd6e7363e6e4c317)
+ - [ ] [minEnclosingTriangle](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga1513e72f6bbdfc370563664f71e0542f)
+ - [ ] [pointPolygonTest](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga1a539e8db2135af2566103705d7a5722)
+ - [ ] [rotatedRectangleIntersection](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8740e7645628c59d238b0b22c2abe2d4)
+
+ - [ ] Motion Analysis and Object Tracking
+ - [ ] **Feature Detection - WORK STARTED** The following functions still need implementation:
+ - [ ] [cornerEigenValsAndVecs](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga4055896d9ef77dd3cacf2c5f60e13f1c)
+ - [ ] [cornerHarris](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#gac1fc3598018010880e370e2f709b4345)
+ - [ ] [cornerMinEigenVal](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga3dbce297c1feb859ee36707e1003e0a8)
+ - [ ] [createLineSegmentDetector](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga6b2ad2353c337c42551b521a73eeae7d)
+ - [ ] [preCornerDetect](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#gaa819f39b5c994871774081803ae22586)
+
+ - [X] **Object Detection**
+
+- [X] **imgcodecs. Image file reading and writing.**
+- [X] **videoio. Video I/O**
+- [X] **highgui. High-level GUI**
+- [ ] **video. Video Analysis - WORK STARTED**
+ - [X] **Motion Analysis**
+ - [ ] **Object Tracking - WORK STARTED** The following functions still need implementation:
+ - [ ] [buildOpticalFlowPyramid](https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga86640c1c470f87b2660c096d2b22b2ce)
+ - [ ] [estimateRigidTransform](https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga762cbe5efd52cf078950196f3c616d48)
+ - [ ] [findTransformECC](https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga7ded46f9a55c0364c92ccd2019d43e3a)
+ - [ ] [meanShift](https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga7ded46f9a55c0364c92ccd2019d43e3a)
+ - [ ] [CamShift](https://docs.opencv.org/master/dc/d6b/group__video__track.html#gaef2bd39c8356f423124f1fe7c44d54a1)
+ - [ ] [DualTVL1OpticalFlow](https://docs.opencv.org/master/dc/d47/classcv_1_1DualTVL1OpticalFlow.html)
+ - [ ] [FarnebackOpticalFlow](https://docs.opencv.org/master/de/d9e/classcv_1_1FarnebackOpticalFlow.html)
+ - [ ] [KalmanFilter](https://docs.opencv.org/master/dd/d6a/classcv_1_1KalmanFilter.html)
+ - [ ] [SparsePyrLKOpticalFlow](https://docs.opencv.org/master/d7/d08/classcv_1_1SparsePyrLKOpticalFlow.html)
+
+- [ ] **calib3d. Camera Calibration and 3D Reconstruction - WORK STARTED**. The following functions still need implementation:
+ - [ ] **Camera Calibration - WORK STARTED** The following functions still need implementation:
+ - [ ] [calibrateCamera](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [calibrateCameraRO](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [calibrateHandEye](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [calibrationMatrixValues](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [checkChessboard](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [composeRT](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [computeCorrespondEpilines](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [convertPointsFromHomogeneous](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [convertPointsHomogeneous](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [convertPointsToHomogeneous](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [correctMatches](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [decomposeEssentialMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [decomposeHomographyMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [decomposeProjectionMatrix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [drawChessboardCorners](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [drawFrameAxes](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [estimateAffine2D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [estimateAffine3D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [estimateAffinePartial2D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [filterHomographyDecompByVisibleRefpoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [filterSpeckles](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [find4QuadCornerSubpix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [findChessboardCorners](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [findChessboardCornersSB](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [findCirclesGrid](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [findEssentialMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [findFundamentalMat](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [findHomography](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [getDefaultNewCameraMatrix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [getOptimalNewCameraMatrix](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [getValidDisparityROI](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [initCameraMatrix2D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [initUndistortRectifyMap](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [initWideAngleProjMap](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [matMulDeriv](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [projectPoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [recoverPose](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [rectify3Collinear](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [reprojectImageTo3D](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [Rodrigues](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [RQDecomp3x3](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [sampsonDistance](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [solveP3P](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [solvePnP](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [solvePnPGeneric](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [solvePnPRansac](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [solvePnPRefineLM](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [solvePnPRefineVVS](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [stereoCalibrate](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [stereoRectify](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [stereoRectifyUncalibrated](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [triangulatePoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [x] [undistort](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [undistortPoints](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+ - [ ] [validateDisparity](https://docs.opencv.org/master/d9/d0c/group__calib3d.html)
+
+ - [ ] **Fisheye - WORK STARTED** The following functions still need implementation:
+ - [ ] [calibrate](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gad626a78de2b1dae7489e152a5a5a89e1)
+ - [ ] [distortPoints](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#ga75d8877a98e38d0b29b6892c5f8d7765)
+ - [ ] [estimateNewCameraMatrixForUndistortRectify](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#ga384940fdf04c03e362e94b6eb9b673c9)
+ - [ ] [projectPoints](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gab1ad1dc30c42ee1a50ce570019baf2c4)
+ - [ ] [stereoCalibrate](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gadbb3a6ca6429528ef302c784df47949b)
+ - [ ] [stereoRectify](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gac1af58774006689056b0f2ef1db55ecc)
+ - [ ] [undistortPoints](https://docs.opencv.org/master/db/d58/group__calib3d__fisheye.html#gab738cdf90ceee97b2b52b0d0e7511541)
+
+- [ ] **features2d. 2D Features Framework - WORK STARTED**
+ - [X] **Feature Detection and Description**
+ - [ ] **Descriptor Matchers - WORK STARTED** The following functions still need implementation:
+ - [ ] [FlannBasedMatcher](https://docs.opencv.org/master/dc/de2/classcv_1_1FlannBasedMatcher.html)
+ - [ ] **Drawing Function of Keypoints and Matches - WORK STARTED** The following function still needs implementation:
+ - [ ] [drawMatches](https://docs.opencv.org/master/d4/d5d/group__features2d__draw.html#ga7421b3941617d7267e3f2311582f49e1)
+ - [ ] Object Categorization
+ - [ ] [BOWImgDescriptorExtractor](https://docs.opencv.org/master/d2/d6b/classcv_1_1BOWImgDescriptorExtractor.html)
+ - [ ] [BOWKMeansTrainer](https://docs.opencv.org/master/d4/d72/classcv_1_1BOWKMeansTrainer.html)
+
+- [X] **objdetect. Object Detection**
+- [ ] **dnn. Deep Neural Network module - WORK STARTED** The following functions still need implementation:
+ - [ ] [NMSBoxes](https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee)
+
+- [ ] ml. Machine Learning
+- [ ] flann. Clustering and Search in Multi-Dimensional Spaces
+- [ ] photo. Computational Photography
+- [ ] stitching. Images stitching
+- [ ] cudaarithm. Operations on Matrices
+- [ ] cudabgsegm. Background Segmentation
+- [ ] cudacodec. Video Encoding/Decoding
+- [ ] cudafeatures2d. Feature Detection and Description
+- [ ] cudafilters. Image Filtering
+- [ ] cudaimgproc. Image Processing
+- [ ] cudalegacy. Legacy support
+- [ ] cudaobjdetect. Object Detection
+- [ ] **cudaoptflow. Optical Flow - WORK STARTED**
+ - [ ] [BroxOpticalFlow](https://docs.opencv.org/master/d7/d18/classcv_1_1cuda_1_1BroxOpticalFlow.html)
+ - [ ] [DenseOpticalFlow](https://docs.opencv.org/master/d6/d4a/classcv_1_1cuda_1_1DenseOpticalFlow.html)
+ - [ ] [DensePyrLKOpticalFlow](https://docs.opencv.org/master/d0/da4/classcv_1_1cuda_1_1DensePyrLKOpticalFlow.html)
+ - [ ] [FarnebackOpticalFlow](https://docs.opencv.org/master/d9/d30/classcv_1_1cuda_1_1FarnebackOpticalFlow.html)
+ - [ ] [NvidiaHWOpticalFlow](https://docs.opencv.org/master/d5/d26/classcv_1_1cuda_1_1NvidiaHWOpticalFlow.html)
+ - [ ] [NvidiaOpticalFlow_1_0](https://docs.opencv.org/master/dc/d9d/classcv_1_1cuda_1_1NvidiaOpticalFlow__1__0.html)
+ - [ ] [SparseOpticalFlow](https://docs.opencv.org/master/d5/dcf/classcv_1_1cuda_1_1SparseOpticalFlow.html)
+ - [ ] **[SparsePyrLKOpticalFlow](https://docs.opencv.org/master/d7/d05/classcv_1_1cuda_1_1SparsePyrLKOpticalFlow.html) - WORK STARTED**
+
+- [ ] cudastereo. Stereo Correspondence
+- [X] **cudawarping. Image Warping**
+- [ ] cudev. Device layer
+- [ ] shape. Shape Distance and Matching
+- [ ] superres. Super Resolution
+- [ ] videostab. Video Stabilization
+- [ ] viz. 3D Visualizer
+
+## Contrib modules list
+
+- [ ] aruco. ArUco Marker Detection
+- [X] **bgsegm. Improved Background-Foreground Segmentation Methods - WORK STARTED**
+- [ ] bioinspired. Biologically inspired vision models and derivated tools
+- [ ] ccalib. Custom Calibration Pattern for 3D reconstruction
+- [ ] cnn_3dobj. 3D object recognition and pose estimation API
+- [ ] cvv. GUI for Interactive Visual Debugging of Computer Vision Programs
+- [ ] datasets. Framework for working with different datasets
+- [ ] dnn_modern. Deep Learning Modern Module
+- [ ] dpm. Deformable Part-based Models
+- [ ] **face. Face Recognition - WORK STARTED**
+- [ ] freetype. Drawing UTF-8 strings with freetype/harfbuzz
+- [ ] fuzzy. Image processing based on fuzzy mathematics
+- [ ] hdf. Hierarchical Data Format I/O routines
+- [X] **img_hash. The module brings implementations of different image hashing algorithms.**
+- [ ] line_descriptor. Binary descriptors for lines extracted from an image
+- [ ] matlab. MATLAB Bridge
+- [ ] optflow. Optical Flow Algorithms
+- [ ] phase_unwrapping. Phase Unwrapping API
+- [ ] plot. Plot function for Mat data
+- [ ] reg. Image Registration
+- [ ] rgbd. RGB-Depth Processing
+- [ ] saliency. Saliency API
+- [ ] sfm. Structure From Motion
+- [ ] stereo. Stereo Correspondance Algorithms
+- [ ] structured_light. Structured Light API
+- [ ] surface_matching. Surface Matching
+- [ ] text. Scene Text Detection and Recognition
+- [ ] **tracking. Tracking API - WORK STARTED**
+- [ ] **xfeatures2d. Extra 2D Features Framework - WORK STARTED**
+- [ ] ximgproc. Extended Image Processing
+- [ ] xobjdetect. Extended object detection
+- [ ] xphoto. Additional photo processing algorithms
diff --git a/vendor/gocv.io/x/gocv/appveyor.yml b/vendor/gocv.io/x/gocv/appveyor.yml
new file mode 100644
index 0000000..4e98ade
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/appveyor.yml
@@ -0,0 +1,35 @@
+version: "{build}"
+
+clone_folder: c:\gopath\src\gocv.io\x\gocv
+
+platform:
+ - MinGW_x64
+
+environment:
+ GOPATH: c:\gopath
+ GOROOT: c:\go
+ GOVERSION: 1.13
+ TEST_EXTERNAL: 1
+ APPVEYOR_SAVE_CACHE_ON_ERROR: true
+
+cache:
+ - C:\opencv -> appveyor_build_opencv.cmd
+
+install:
+ - if not exist "C:\opencv" appveyor_build_opencv.cmd
+ - set PATH=C:\Perl\site\bin;C:\Perl\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\7-Zip;C:\Program Files\Microsoft\Web Platform Installer\;C:\Tools\PsTools;C:\Program Files (x86)\CMake\bin;C:\go\bin;C:\Tools\NuGet;C:\Program Files\LLVM\bin;C:\Tools\curl\bin;C:\ProgramData\chocolatey\bin;C:\Program Files (x86)\Yarn\bin;C:\Users\appveyor\AppData\Local\Yarn\bin;C:\Program Files\AppVeyor\BuildAgent\
+ - set PATH=%PATH%;C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
+ - set PATH=%PATH%;C:\Tools\GitVersion;C:\Program Files\Git LFS;C:\Program Files\Git\cmd;C:\Program Files\Git\usr\bin;C:\opencv\build\install\x64\mingw\bin;
+ - echo %PATH%
+ - echo %GOPATH%
+ - go version
+ - cd c:\gopath\src\gocv.io\x\gocv
+ - go get -d .
+ - set GOCV_CAFFE_TEST_FILES=C:\opencv\testdata
+ - set GOCV_TENSORFLOW_TEST_FILES=C:\opencv\testdata
+ - set OPENCV_ENABLE_NONFREE=ON
+ - go env
+
+build_script:
+ - go test -tags matprofile -v .
+ - go test -tags matprofile -v ./contrib
diff --git a/vendor/gocv.io/x/gocv/appveyor_build_opencv.cmd b/vendor/gocv.io/x/gocv/appveyor_build_opencv.cmd
new file mode 100644
index 0000000..7813c25
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/appveyor_build_opencv.cmd
@@ -0,0 +1,23 @@
+if not exist "C:\opencv" mkdir "C:\opencv"
+if not exist "C:\opencv\build" mkdir "C:\opencv\build"
+if not exist "C:\opencv\testdata" mkdir "C:\opencv\testdata"
+
+appveyor DownloadFile https://github.com/opencv/opencv/archive/4.2.0.zip -FileName c:\opencv\opencv-4.2.0.zip
+7z x c:\opencv\opencv-4.2.0.zip -oc:\opencv -y
+del c:\opencv\opencv-4.2.0.zip /q
+appveyor DownloadFile https://github.com/opencv/opencv_contrib/archive/4.2.0.zip -FileName c:\opencv\opencv_contrib-4.2.0.zip
+7z x c:\opencv\opencv_contrib-4.2.0.zip -oc:\opencv -y
+del c:\opencv\opencv_contrib-4.2.0.zip /q
+cd C:\opencv\build
+set PATH=C:\Perl\site\bin;C:\Perl\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\7-Zip;C:\Program Files\Microsoft\Web Platform Installer\;C:\Tools\PsTools;C:\Program Files (x86)\CMake\bin;C:\go\bin;C:\Tools\NuGet;C:\Program Files\LLVM\bin;C:\Tools\curl\bin;C:\ProgramData\chocolatey\bin;C:\Program Files (x86)\Yarn\bin;C:\Users\appveyor\AppData\Local\Yarn\bin;C:\Program Files\AppVeyor\BuildAgent\
+set PATH=%PATH%;C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
+dir C:\opencv
+cmake C:\opencv\opencv-4.2.0 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.2.0\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DBUILD_opencv_gapi=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON -DWITH_OPENCL_D3D11_NV=OFF -Wno-dev
+mingw32-make -j%NUMBER_OF_PROCESSORS%
+mingw32-make install
+appveyor DownloadFile https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/bvlc_googlenet.prototxt -FileName C:\opencv\testdata\bvlc_googlenet.prototxt
+appveyor DownloadFile http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel -FileName C:\opencv\testdata\bvlc_googlenet.caffemodel
+appveyor DownloadFile https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip -FileName C:\opencv\testdata\inception5h.zip
+7z x C:\opencv\testdata\inception5h.zip -oC:\opencv\testdata tensorflow_inception_graph.pb -y
+rmdir c:\opencv\opencv-4.2.0 /s /q
+rmdir c:\opencv\opencv_contrib-4.2.0 /s /q
diff --git a/vendor/gocv.io/x/gocv/asyncarray.cpp b/vendor/gocv.io/x/gocv/asyncarray.cpp
new file mode 100644
index 0000000..b457ff8
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/asyncarray.cpp
@@ -0,0 +1,28 @@
+// +build openvino
+
+#include
+#include "asyncarray.h"
+
+
+// AsyncArray_New creates a new empty AsyncArray
+AsyncArray AsyncArray_New() {
+ return new cv::AsyncArray();
+}
+
+// AsyncArray_Close deletes an existing AsyncArray
+void AsyncArray_Close(AsyncArray a) {
+ delete a;
+}
+
+const char* AsyncArray_GetAsync(AsyncArray async_out,Mat out) {
+ try {
+ async_out->get(*out);
+ } catch(cv::Exception ex) {
+ return ex.err.c_str();
+ }
+ return "";
+}
+
+AsyncArray Net_forwardAsync(Net net, const char* outputName) {
+ return new cv::AsyncArray(net->forwardAsync(outputName));
+}
diff --git a/vendor/gocv.io/x/gocv/asyncarray.go b/vendor/gocv.io/x/gocv/asyncarray.go
new file mode 100644
index 0000000..345ec5e
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/asyncarray.go
@@ -0,0 +1,52 @@
+// +build openvino
+
+package gocv
+
+import (
+ "errors"
+)
+
+/*
+#include
+#include "dnn.h"
+#include "asyncarray.h"
+#include "core.h"
+*/
+import "C"
+
+type AsyncArray struct {
+ p C.AsyncArray
+}
+
+// NewAsyncArray returns a new empty AsyncArray.
+func NewAsyncArray() AsyncArray {
+ return newAsyncArray(C.AsyncArray_New())
+}
+
+// Ptr returns the AsyncArray's underlying object pointer.
+func (a *AsyncArray) Ptr() C.AsyncArray {
+ return a.p
+}
+
+// Get async returns the Mat
+func (m *AsyncArray) Get(mat *Mat) error {
+ result := C.AsyncArray_GetAsync(m.p, mat.p)
+ err := C.GoString(result)
+
+ if len(err) > 0 {
+ return errors.New(err)
+ }
+ return nil
+}
+
+// newAsyncArray returns a new AsyncArray from a C AsyncArray
+func newAsyncArray(p C.AsyncArray) AsyncArray {
+ return AsyncArray{p: p}
+}
+
+// Close the AsyncArray object.
+func (a *AsyncArray) Close() error {
+ C.AsyncArray_Close(a.p)
+ a.p = nil
+ return nil
+}
diff --git a/vendor/gocv.io/x/gocv/asyncarray.h b/vendor/gocv.io/x/gocv/asyncarray.h
new file mode 100644
index 0000000..cf894b6
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/asyncarray.h
@@ -0,0 +1,23 @@
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+#include "core.h"
+#include "dnn.h"
+
+#ifdef __cplusplus
+typedef cv::AsyncArray* AsyncArray;
+#else
+typedef void* AsyncArray;
+#endif
+
+AsyncArray AsyncArray_New();
+const char* AsyncArray_GetAsync(AsyncArray async_out,Mat out);
+void AsyncArray_Close(AsyncArray a);
+AsyncArray Net_forwardAsync(Net net, const char* outputName);
+
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/gocv.io/x/gocv/calib3d.cpp b/vendor/gocv.io/x/gocv/calib3d.cpp
new file mode 100644
index 0000000..9a3edbd
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/calib3d.cpp
@@ -0,0 +1,33 @@
+#include "calib3d.h"
+
+
+void Fisheye_UndistortImage(Mat distorted, Mat undistorted, Mat k, Mat d) {
+ cv::fisheye::undistortImage(*distorted, *undistorted, *k, *d);
+}
+
+void Fisheye_UndistortImageWithParams(Mat distorted, Mat undistorted, Mat k, Mat d, Mat knew, Size size) {
+ cv::Size sz(size.width, size.height);
+ cv::fisheye::undistortImage(*distorted, *undistorted, *k, *d, *knew, sz);
+}
+
+void InitUndistortRectifyMap(Mat cameraMatrix,Mat distCoeffs,Mat r,Mat newCameraMatrix,Size size,int m1type,Mat map1,Mat map2) {
+ cv::Size sz(size.width, size.height);
+ cv::initUndistortRectifyMap(*cameraMatrix,*distCoeffs,*r,*newCameraMatrix,sz,m1type,*map1,*map2);
+}
+
+Mat GetOptimalNewCameraMatrixWithParams(Mat cameraMatrix,Mat distCoeffs,Size size,double alpha,Size newImgSize,Rect* validPixROI,bool centerPrincipalPoint) {
+ cv::Size sz(size.width, size.height);
+ cv::Size newSize(newImgSize.width, newImgSize.height);
+ cv::Rect rect(validPixROI->x,validPixROI->y,validPixROI->width,validPixROI->height);
+ cv::Mat* mat = new cv::Mat(cv::getOptimalNewCameraMatrix(*cameraMatrix,*distCoeffs,sz,alpha,newSize,&rect,centerPrincipalPoint));
+ validPixROI->x = rect.x;
+ validPixROI->y = rect.y;
+ validPixROI->width = rect.width;
+ validPixROI->height = rect.height;
+ return mat;
+}
+
+void Undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix) {
+ cv::undistort(*src, *dst, *cameraMatrix, *distCoeffs, *newCameraMatrix);
+}
+
diff --git a/vendor/gocv.io/x/gocv/calib3d.go b/vendor/gocv.io/x/gocv/calib3d.go
new file mode 100644
index 0000000..3246933
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/calib3d.go
@@ -0,0 +1,103 @@
+package gocv
+
+/*
+#include
+#include "calib3d.h"
+*/
+import "C"
+import "image"
+
+// Calib is a wrapper around OpenCV's "Camera Calibration and 3D Reconstruction" of
+// Fisheye Camera model
+//
+// For more details, please see:
+// https://docs.opencv.org/trunk/db/d58/group__calib3d__fisheye.html
+
+// CalibFlag value for calibration
+type CalibFlag int32
+
+const (
+ // CalibUseIntrinsicGuess indicates that cameraMatrix contains valid initial values
+ // of fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially
+ // set to the image center ( imageSize is used), and focal distances are computed
+ // in a least-squares fashion.
+ CalibUseIntrinsicGuess CalibFlag = 1 << iota
+
+ // CalibRecomputeExtrinsic indicates that extrinsic will be recomputed after each
+ // iteration of intrinsic optimization.
+ CalibRecomputeExtrinsic
+
+ // CalibCheckCond indicates that the functions will check validity of condition number
+ CalibCheckCond
+
+ // CalibFixSkew indicates that skew coefficient (alpha) is set to zero and stay zero
+ CalibFixSkew
+
+ // CalibFixK1 indicates that selected distortion coefficients are set to zeros and stay zero
+ CalibFixK1
+
+ // CalibFixK2 indicates that selected distortion coefficients are set to zeros and stay zero
+ CalibFixK2
+
+ // CalibFixK3 indicates that selected distortion coefficients are set to zeros and stay zero
+ CalibFixK3
+
+ // CalibFixK4 indicates that selected distortion coefficients are set to zeros and stay zero
+ CalibFixK4
+
+ // CalibFixIntrinsic indicates that fix K1, K2? and D1, D2? so that only R, T matrices are estimated
+ CalibFixIntrinsic
+
+ // CalibFixPrincipalPoint indicates that the principal point is not changed during the global optimization.
+ // It stays at the center or at a different location specified when CalibUseIntrinsicGuess is set too.
+ CalibFixPrincipalPoint
+)
+
+// FisheyeUndistortImage transforms an image to compensate for fisheye lens distortion
+func FisheyeUndistortImage(distorted Mat, undistorted *Mat, k, d Mat) {
+ C.Fisheye_UndistortImage(distorted.Ptr(), undistorted.Ptr(), k.Ptr(), d.Ptr())
+}
+
+// FisheyeUndistortImageWithParams transforms an image to compensate for fisheye lens distortion with Knew matrix
+func FisheyeUndistortImageWithParams(distorted Mat, undistorted *Mat, k, d, knew Mat, size image.Point) {
+ sz := C.struct_Size{
+ width: C.int(size.X),
+ height: C.int(size.Y),
+ }
+ C.Fisheye_UndistortImageWithParams(distorted.Ptr(), undistorted.Ptr(), k.Ptr(), d.Ptr(), knew.Ptr(), sz)
+}
+
+// InitUndistortRectifyMap computes the joint undistortion and rectification transformation and represents the result in the form of maps for remap
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7dfb72c9cf9780a347fbe3d1c47e5d5a
+//
+func InitUndistortRectifyMap(cameraMatrix Mat, distCoeffs Mat, r Mat, newCameraMatrix Mat, size image.Point, m1type int, map1 Mat, map2 Mat) {
+ sz := C.struct_Size{
+ width: C.int(size.X),
+ height: C.int(size.Y),
+ }
+ C.InitUndistortRectifyMap(cameraMatrix.Ptr(), distCoeffs.Ptr(), r.Ptr(), newCameraMatrix.Ptr(), sz, C.int(m1type), map1.Ptr(), map2.Ptr())
+}
+
+// GetOptimalNewCameraMatrixWithParams computes and returns the optimal new camera matrix based on the free scaling parameter.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga7a6c4e032c97f03ba747966e6ad862b1
+//
+func GetOptimalNewCameraMatrixWithParams(cameraMatrix Mat, distCoeffs Mat, imageSize image.Point, alpha float64, newImgSize image.Point, centerPrincipalPoint bool) (Mat, image.Rectangle) {
+ sz := C.struct_Size{
+ width: C.int(imageSize.X),
+ height: C.int(imageSize.Y),
+ }
+ newSize := C.struct_Size{
+ width: C.int(newImgSize.X),
+ height: C.int(newImgSize.Y),
+ }
+ rt := C.struct_Rect{}
+ return newMat(C.GetOptimalNewCameraMatrixWithParams(cameraMatrix.Ptr(), distCoeffs.Ptr(), sz, C.double(alpha), newSize, &rt, C.bool(centerPrincipalPoint))), toRect(rt)
+}
+
+func Undistort(src Mat, dst *Mat, cameraMatrix Mat, distCoeffs Mat, newCameraMatrix Mat) {
+ C.Undistort(src.Ptr(), dst.Ptr(), cameraMatrix.Ptr(), distCoeffs.Ptr(), newCameraMatrix.Ptr())
+}
diff --git a/vendor/gocv.io/x/gocv/calib3d.h b/vendor/gocv.io/x/gocv/calib3d.h
new file mode 100644
index 0000000..f93918f
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/calib3d.h
@@ -0,0 +1,25 @@
+#ifndef _OPENCV3_CALIB_H_
+#define _OPENCV3_CALIB_H_
+
+#ifdef __cplusplus
+#include
+#include
+
+
+extern "C" {
+#endif
+
+#include "core.h"
+
+//Calib
+void Fisheye_UndistortImage(Mat distorted, Mat undistorted, Mat k, Mat d);
+void Fisheye_UndistortImageWithParams(Mat distorted, Mat undistorted, Mat k, Mat d, Mat knew, Size size);
+
+void InitUndistortRectifyMap(Mat cameraMatrix,Mat distCoeffs,Mat r,Mat newCameraMatrix,Size size,int m1type,Mat map1,Mat map2);
+Mat GetOptimalNewCameraMatrixWithParams(Mat cameraMatrix,Mat distCoeffs,Size size,double alpha,Size newImgSize,Rect* validPixROI,bool centerPrincipalPoint);
+void Undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix);
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_CALIB_H
diff --git a/vendor/gocv.io/x/gocv/calib3d_string.go b/vendor/gocv.io/x/gocv/calib3d_string.go
new file mode 100644
index 0000000..baa5451
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/calib3d_string.go
@@ -0,0 +1,27 @@
+package gocv
+
+func (c CalibFlag) String() string {
+ switch c {
+ case CalibUseIntrinsicGuess:
+ return "calib-use-intrinsec-guess"
+ case CalibRecomputeExtrinsic:
+ return "calib-recompute-extrinsic"
+ case CalibCheckCond:
+ return "calib-check-cond"
+ case CalibFixSkew:
+ return "calib-fix-skew"
+ case CalibFixK1:
+ return "calib-fix-k1"
+ case CalibFixK2:
+ return "calib-fix-k2"
+ case CalibFixK3:
+ return "calib-fix-k3"
+ case CalibFixK4:
+ return "calib-fix-k4"
+ case CalibFixIntrinsic:
+ return "calib-fix-intrinsic"
+ case CalibFixPrincipalPoint:
+ return "calib-fix-principal-point"
+ }
+ return ""
+}
diff --git a/vendor/gocv.io/x/gocv/cgo.go b/vendor/gocv.io/x/gocv/cgo.go
new file mode 100644
index 0000000..8862cb0
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/cgo.go
@@ -0,0 +1,13 @@
+// +build !customenv,!openvino
+
+package gocv
+
+// Changes here should be mirrored in contrib/cgo.go and cuda/cgo.go.
+
+/*
+#cgo !windows pkg-config: opencv4
+#cgo CXXFLAGS: --std=c++11
+#cgo windows CPPFLAGS: -IC:/opencv/build/install/include
+#cgo windows LDFLAGS: -LC:/opencv/build/install/x64/mingw/lib -lopencv_core420 -lopencv_face420 -lopencv_videoio420 -lopencv_imgproc420 -lopencv_highgui420 -lopencv_imgcodecs420 -lopencv_objdetect420 -lopencv_features2d420 -lopencv_video420 -lopencv_dnn420 -lopencv_xfeatures2d420 -lopencv_plot420 -lopencv_tracking420 -lopencv_img_hash420 -lopencv_calib3d420
+*/
+import "C"
diff --git a/vendor/gocv.io/x/gocv/codecov.yml b/vendor/gocv.io/x/gocv/codecov.yml
new file mode 100644
index 0000000..f608620
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/codecov.yml
@@ -0,0 +1,3 @@
+ignore:
+ - "*_string.go"
+ - "*/*_string.go"
diff --git a/vendor/gocv.io/x/gocv/core.cpp b/vendor/gocv.io/x/gocv/core.cpp
new file mode 100644
index 0000000..2d312f2
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/core.cpp
@@ -0,0 +1,763 @@
+#include "core.h"
+#include
+
+// Mat_New creates a new empty Mat
+Mat Mat_New() {
+ return new cv::Mat();
+}
+
+// Mat_NewWithSize creates a new Mat with a specific size dimension and number of channels.
+Mat Mat_NewWithSize(int rows, int cols, int type) {
+ return new cv::Mat(rows, cols, type, 0.0);
+}
+
+// Mat_NewFromScalar creates a new Mat from a Scalar. Intended to be used
+// for Mat comparison operation such as InRange.
+Mat Mat_NewFromScalar(Scalar ar, int type) {
+ cv::Scalar c = cv::Scalar(ar.val1, ar.val2, ar.val3, ar.val4);
+ return new cv::Mat(1, 1, type, c);
+}
+
+// Mat_NewWithSizeFromScalar creates a new Mat from a Scalar with a specific size dimension and number of channels
+Mat Mat_NewWithSizeFromScalar(Scalar ar, int rows, int cols, int type) {
+ cv::Scalar c = cv::Scalar(ar.val1, ar.val2, ar.val3, ar.val4);
+ return new cv::Mat(rows, cols, type, c);
+}
+
+Mat Mat_NewFromBytes(int rows, int cols, int type, struct ByteArray buf) {
+ return new cv::Mat(rows, cols, type, buf.data);
+}
+
+Mat Mat_FromPtr(Mat m, int rows, int cols, int type, int prow, int pcol) {
+ return new cv::Mat(rows, cols, type, m->ptr(prow, pcol));
+}
+
+// Mat_Close deletes an existing Mat
+void Mat_Close(Mat m) {
+ delete m;
+}
+
+// Mat_Empty tests if a Mat is empty
+int Mat_Empty(Mat m) {
+ return m->empty();
+}
+
+// Mat_Clone returns a clone of this Mat
+Mat Mat_Clone(Mat m) {
+ return new cv::Mat(m->clone());
+}
+
+// Mat_CopyTo copies this Mat to another Mat.
+void Mat_CopyTo(Mat m, Mat dst) {
+ m->copyTo(*dst);
+}
+
+// Mat_CopyToWithMask copies this Mat to another Mat while applying the mask
+void Mat_CopyToWithMask(Mat m, Mat dst, Mat mask) {
+ m->copyTo(*dst, *mask);
+}
+
+void Mat_ConvertTo(Mat m, Mat dst, int type) {
+ m->convertTo(*dst, type);
+}
+
+// Mat_ToBytes returns the bytes representation of the underlying data.
+struct ByteArray Mat_ToBytes(Mat m) {
+ return toByteArray(reinterpret_cast(m->data), m->total() * m->elemSize());
+}
+
+struct ByteArray Mat_DataPtr(Mat m) {
+ return ByteArray {reinterpret_cast(m->data), static_cast(m->total() * m->elemSize())};
+}
+
+// Mat_Region returns a Mat of a region of another Mat
+Mat Mat_Region(Mat m, Rect r) {
+ return new cv::Mat(*m, cv::Rect(r.x, r.y, r.width, r.height));
+}
+
+Mat Mat_Reshape(Mat m, int cn, int rows) {
+ return new cv::Mat(m->reshape(cn, rows));
+}
+
+void Mat_PatchNaNs(Mat m) {
+ cv::patchNaNs(*m);
+}
+
+Mat Mat_ConvertFp16(Mat m) {
+ Mat dst = new cv::Mat();
+ cv::convertFp16(*m, *dst);
+ return dst;
+}
+
+Mat Mat_Sqrt(Mat m) {
+ Mat dst = new cv::Mat();
+ cv::sqrt(*m, *dst);
+ return dst;
+}
+
+// Mat_Mean calculates the mean value M of array elements, independently for each channel, and return it as Scalar vector
+Scalar Mat_Mean(Mat m) {
+ cv::Scalar c = cv::mean(*m);
+ Scalar scal = Scalar();
+ scal.val1 = c.val[0];
+ scal.val2 = c.val[1];
+ scal.val3 = c.val[2];
+ scal.val4 = c.val[3];
+ return scal;
+}
+
+// Mat_MeanWithMask calculates the mean value M of array elements,
+// independently for each channel, and returns it as Scalar vector
+// while applying the mask.
+
+Scalar Mat_MeanWithMask(Mat m, Mat mask){
+ cv::Scalar c = cv::mean(*m, *mask);
+ Scalar scal = Scalar();
+ scal.val1 = c.val[0];
+ scal.val2 = c.val[1];
+ scal.val3 = c.val[2];
+ scal.val4 = c.val[3];
+ return scal;
+}
+
+void LUT(Mat src, Mat lut, Mat dst) {
+ cv::LUT(*src, *lut, *dst);
+}
+
+// Mat_Rows returns how many rows in this Mat.
+int Mat_Rows(Mat m) {
+ return m->rows;
+}
+
+// Mat_Cols returns how many columns in this Mat.
+int Mat_Cols(Mat m) {
+ return m->cols;
+}
+
+// Mat_Channels returns how many channels in this Mat.
+int Mat_Channels(Mat m) {
+ return m->channels();
+}
+
+// Mat_Type returns the type from this Mat.
+int Mat_Type(Mat m) {
+ return m->type();
+}
+
+// Mat_Step returns the number of bytes each matrix row occupies.
+int Mat_Step(Mat m) {
+ return m->step;
+}
+
+int Mat_Total(Mat m) {
+ return m->total();
+}
+
+void Mat_Size(Mat m, IntVector* res) {
+ cv::MatSize ms(m->size);
+ int* ids = new int[ms.dims()];
+
+ for (size_t i = 0; i < ms.dims(); ++i) {
+ ids[i] = ms[i];
+ }
+
+ res->length = ms.dims();
+ res->val = ids;
+ return;
+}
+
+// Mat_GetUChar returns a specific row/col value from this Mat expecting
+// each element to contain a schar aka CV_8U.
+uint8_t Mat_GetUChar(Mat m, int row, int col) {
+ return m->at(row, col);
+}
+
+uint8_t Mat_GetUChar3(Mat m, int x, int y, int z) {
+ return m->at(x, y, z);
+}
+
+// Mat_GetSChar returns a specific row/col value from this Mat expecting
+// each element to contain a schar aka CV_8S.
+int8_t Mat_GetSChar(Mat m, int row, int col) {
+ return m->at(row, col);
+}
+
+int8_t Mat_GetSChar3(Mat m, int x, int y, int z) {
+ return m->at(x, y, z);
+}
+
+// Mat_GetShort returns a specific row/col value from this Mat expecting
+// each element to contain a short aka CV_16S.
+int16_t Mat_GetShort(Mat m, int row, int col) {
+ return m->at(row, col);
+}
+
+int16_t Mat_GetShort3(Mat m, int x, int y, int z) {
+ return m->at(x, y, z);
+}
+
+// Mat_GetInt returns a specific row/col value from this Mat expecting
+// each element to contain an int aka CV_32S.
+int32_t Mat_GetInt(Mat m, int row, int col) {
+ return m->at(row, col);
+}
+
+int32_t Mat_GetInt3(Mat m, int x, int y, int z) {
+ return m->at(x, y, z);
+}
+
+// Mat_GetFloat returns a specific row/col value from this Mat expecting
+// each element to contain a float aka CV_32F.
+float Mat_GetFloat(Mat m, int row, int col) {
+ return m->at(row, col);
+}
+
+float Mat_GetFloat3(Mat m, int x, int y, int z) {
+ return m->at(x, y, z);
+}
+
+// Mat_GetDouble returns a specific row/col value from this Mat expecting
+// each element to contain a double aka CV_64F.
+double Mat_GetDouble(Mat m, int row, int col) {
+ return m->at(row, col);
+}
+
+double Mat_GetDouble3(Mat m, int x, int y, int z) {
+ return m->at(x, y, z);
+}
+
+void Mat_SetTo(Mat m, Scalar value) {
+ cv::Scalar c_value(value.val1, value.val2, value.val3, value.val4);
+ m->setTo(c_value);
+}
+
+// Mat_SetUChar set a specific row/col value from this Mat expecting
+// each element to contain a schar aka CV_8U.
+void Mat_SetUChar(Mat m, int row, int col, uint8_t val) {
+ m->at(row, col) = val;
+}
+
+void Mat_SetUChar3(Mat m, int x, int y, int z, uint8_t val) {
+ m->at(x, y, z) = val;
+}
+
+// Mat_SetSChar set a specific row/col value from this Mat expecting
+// each element to contain a schar aka CV_8S.
+void Mat_SetSChar(Mat m, int row, int col, int8_t val) {
+ m->at(row, col) = val;
+}
+
+void Mat_SetSChar3(Mat m, int x, int y, int z, int8_t val) {
+ m->at(x, y, z) = val;
+}
+
+// Mat_SetShort set a specific row/col value from this Mat expecting
+// each element to contain a short aka CV_16S.
+void Mat_SetShort(Mat m, int row, int col, int16_t val) {
+ m->at(row, col) = val;
+}
+
+void Mat_SetShort3(Mat m, int x, int y, int z, int16_t val) {
+ m->at(x, y, z) = val;
+}
+
+// Mat_SetInt set a specific row/col value from this Mat expecting
+// each element to contain an int aka CV_32S.
+void Mat_SetInt(Mat m, int row, int col, int32_t val) {
+ m->at(row, col) = val;
+}
+
+void Mat_SetInt3(Mat m, int x, int y, int z, int32_t val) {
+ m->at(x, y, z) = val;
+}
+
+// Mat_SetFloat set a specific row/col value from this Mat expecting
+// each element to contain a float aka CV_32F.
+void Mat_SetFloat(Mat m, int row, int col, float val) {
+ m->at(row, col) = val;
+}
+
+void Mat_SetFloat3(Mat m, int x, int y, int z, float val) {
+ m->at(x, y, z) = val;
+}
+
+// Mat_SetDouble set a specific row/col value from this Mat expecting
+// each element to contain a double aka CV_64F.
+void Mat_SetDouble(Mat m, int row, int col, double val) {
+ m->at(row, col) = val;
+}
+
+void Mat_SetDouble3(Mat m, int x, int y, int z, double val) {
+ m->at(x, y, z) = val;
+}
+
+void Mat_AddUChar(Mat m, uint8_t val) {
+ *m += val;
+}
+
+void Mat_SubtractUChar(Mat m, uint8_t val) {
+ *m -= val;
+}
+
+void Mat_MultiplyUChar(Mat m, uint8_t val) {
+ *m *= val;
+}
+
+void Mat_DivideUChar(Mat m, uint8_t val) {
+ *m /= val;
+}
+
+void Mat_AddFloat(Mat m, float val) {
+ *m += val;
+}
+
+void Mat_SubtractFloat(Mat m, float val) {
+ *m -= val;
+}
+
+void Mat_MultiplyFloat(Mat m, float val) {
+ *m *= val;
+}
+
+void Mat_DivideFloat(Mat m, float val) {
+ *m /= val;
+}
+
+Mat Mat_MultiplyMatrix(Mat x, Mat y) {
+ return new cv::Mat((*x) * (*y));
+}
+
+Mat Mat_T(Mat x) {
+ return new cv::Mat(x->t());
+}
+
+void Mat_AbsDiff(Mat src1, Mat src2, Mat dst) {
+ cv::absdiff(*src1, *src2, *dst);
+}
+
+void Mat_Add(Mat src1, Mat src2, Mat dst) {
+ cv::add(*src1, *src2, *dst);
+}
+
+void Mat_AddWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst) {
+ cv::addWeighted(*src1, alpha, *src2, beta, gamma, *dst);
+}
+
+void Mat_BitwiseAnd(Mat src1, Mat src2, Mat dst) {
+ cv::bitwise_and(*src1, *src2, *dst);
+}
+
+void Mat_BitwiseAndWithMask(Mat src1, Mat src2, Mat dst, Mat mask){
+ cv::bitwise_and(*src1, *src2, *dst, *mask);
+}
+
+void Mat_BitwiseNot(Mat src1, Mat dst) {
+ cv::bitwise_not(*src1, *dst);
+}
+
+void Mat_BitwiseNotWithMask(Mat src1, Mat dst, Mat mask) {
+ cv::bitwise_not(*src1, *dst, *mask);
+}
+
+void Mat_BitwiseOr(Mat src1, Mat src2, Mat dst) {
+ cv::bitwise_or(*src1, *src2, *dst);
+}
+
+void Mat_BitwiseOrWithMask(Mat src1, Mat src2, Mat dst, Mat mask) {
+ cv::bitwise_or(*src1, *src2, *dst, *mask);
+}
+
+void Mat_BitwiseXor(Mat src1, Mat src2, Mat dst) {
+ cv::bitwise_xor(*src1, *src2, *dst);
+}
+
+void Mat_BitwiseXorWithMask(Mat src1, Mat src2, Mat dst, Mat mask) {
+ cv::bitwise_xor(*src1, *src2, *dst, *mask);
+}
+
+void Mat_BatchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K,
+ Mat mask, int update, bool crosscheck) {
+ cv::batchDistance(*src1, *src2, *dist, dtype, *nidx, normType, K, *mask, update, crosscheck);
+}
+
+int Mat_BorderInterpolate(int p, int len, int borderType) {
+ return cv::borderInterpolate(p, len, borderType);
+}
+
+void Mat_CalcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags, int ctype) {
+ cv::calcCovarMatrix(*samples, *covar, *mean, flags, ctype);
+}
+
+void Mat_CartToPolar(Mat x, Mat y, Mat magnitude, Mat angle, bool angleInDegrees) {
+ cv::cartToPolar(*x, *y, *magnitude, *angle, angleInDegrees);
+}
+
+bool Mat_CheckRange(Mat m) {
+ return cv::checkRange(*m);
+}
+
+void Mat_Compare(Mat src1, Mat src2, Mat dst, int ct) {
+ cv::compare(*src1, *src2, *dst, ct);
+}
+
+int Mat_CountNonZero(Mat src) {
+ return cv::countNonZero(*src);
+}
+
+
+void Mat_CompleteSymm(Mat m, bool lowerToUpper) {
+ cv::completeSymm(*m, lowerToUpper);
+}
+
+void Mat_ConvertScaleAbs(Mat src, Mat dst, double alpha, double beta) {
+ cv::convertScaleAbs(*src, *dst, alpha, beta);
+}
+
+void Mat_CopyMakeBorder(Mat src, Mat dst, int top, int bottom, int left, int right, int borderType,
+ Scalar value) {
+ cv::Scalar c_value(value.val1, value.val2, value.val3, value.val4);
+ cv::copyMakeBorder(*src, *dst, top, bottom, left, right, borderType, c_value);
+}
+
+void Mat_DCT(Mat src, Mat dst, int flags) {
+ cv::dct(*src, *dst, flags);
+}
+
+double Mat_Determinant(Mat m) {
+ return cv::determinant(*m);
+}
+
+void Mat_DFT(Mat m, Mat dst, int flags) {
+ cv::dft(*m, *dst, flags);
+}
+
+void Mat_Divide(Mat src1, Mat src2, Mat dst) {
+ cv::divide(*src1, *src2, *dst);
+}
+
+bool Mat_Eigen(Mat src, Mat eigenvalues, Mat eigenvectors) {
+ return cv::eigen(*src, *eigenvalues, *eigenvectors);
+}
+
+void Mat_EigenNonSymmetric(Mat src, Mat eigenvalues, Mat eigenvectors) {
+ cv::eigenNonSymmetric(*src, *eigenvalues, *eigenvectors);
+}
+
+void Mat_Exp(Mat src, Mat dst) {
+ cv::exp(*src, *dst);
+}
+
+void Mat_ExtractChannel(Mat src, Mat dst, int coi) {
+ cv::extractChannel(*src, *dst, coi);
+}
+
+void Mat_FindNonZero(Mat src, Mat idx) {
+ cv::findNonZero(*src, *idx);
+}
+
+void Mat_Flip(Mat src, Mat dst, int flipCode) {
+ cv::flip(*src, *dst, flipCode);
+}
+
+void Mat_Gemm(Mat src1, Mat src2, double alpha, Mat src3, double beta, Mat dst, int flags) {
+ cv::gemm(*src1, *src2, alpha, *src3, beta, *dst, flags);
+}
+
+int Mat_GetOptimalDFTSize(int vecsize) {
+ return cv::getOptimalDFTSize(vecsize);
+}
+
+void Mat_Hconcat(Mat src1, Mat src2, Mat dst) {
+ cv::hconcat(*src1, *src2, *dst);
+}
+
+void Mat_Vconcat(Mat src1, Mat src2, Mat dst) {
+ cv::vconcat(*src1, *src2, *dst);
+}
+
+void Rotate(Mat src, Mat dst, int rotateCode) {
+ cv::rotate(*src, *dst, rotateCode);
+}
+
+void Mat_Idct(Mat src, Mat dst, int flags) {
+ cv::idct(*src, *dst, flags);
+}
+
+void Mat_Idft(Mat src, Mat dst, int flags, int nonzeroRows) {
+ cv::idft(*src, *dst, flags, nonzeroRows);
+}
+
+void Mat_InRange(Mat src, Mat lowerb, Mat upperb, Mat dst) {
+ cv::inRange(*src, *lowerb, *upperb, *dst);
+}
+
+void Mat_InRangeWithScalar(Mat src, Scalar lowerb, Scalar upperb, Mat dst) {
+ cv::Scalar lb = cv::Scalar(lowerb.val1, lowerb.val2, lowerb.val3, lowerb.val4);
+ cv::Scalar ub = cv::Scalar(upperb.val1, upperb.val2, upperb.val3, upperb.val4);
+ cv::inRange(*src, lb, ub, *dst);
+}
+
+void Mat_InsertChannel(Mat src, Mat dst, int coi) {
+ cv::insertChannel(*src, *dst, coi);
+}
+
+double Mat_Invert(Mat src, Mat dst, int flags) {
+ double ret = cv::invert(*src, *dst, flags);
+ return ret;
+}
+
+double KMeans(Mat data, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers) {
+ double ret = cv::kmeans(*data, k, *bestLabels, *criteria, attempts, flags, *centers);
+ return ret;
+}
+
+double KMeansPoints(Contour points, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers) {
+ std::vector pts;
+
+ for (size_t i = 0; i < points.length; i++) {
+ pts.push_back(cv::Point2f(points.points[i].x, points.points[i].y));
+ }
+ double ret = cv::kmeans(pts, k, *bestLabels, *criteria, attempts, flags, *centers);
+ return ret;
+}
+
+void Mat_Log(Mat src, Mat dst) {
+ cv::log(*src, *dst);
+}
+
+void Mat_Magnitude(Mat x, Mat y, Mat magnitude) {
+ cv::magnitude(*x, *y, *magnitude);
+}
+
+void Mat_Max(Mat src1, Mat src2, Mat dst) {
+ cv::max(*src1, *src2, *dst);
+}
+
+void Mat_MeanStdDev(Mat src, Mat dstMean, Mat dstStdDev) {
+ cv::meanStdDev(*src, *dstMean, *dstStdDev);
+}
+
+void Mat_Merge(struct Mats mats, Mat dst) {
+ std::vector images;
+
+ for (int i = 0; i < mats.length; ++i) {
+ images.push_back(*mats.mats[i]);
+ }
+
+ cv::merge(images, *dst);
+}
+
+void Mat_Min(Mat src1, Mat src2, Mat dst) {
+ cv::min(*src1, *src2, *dst);
+}
+
+void Mat_MinMaxIdx(Mat m, double* minVal, double* maxVal, int* minIdx, int* maxIdx) {
+ cv::minMaxIdx(*m, minVal, maxVal, minIdx, maxIdx);
+}
+
+void Mat_MinMaxLoc(Mat m, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc) {
+ cv::Point cMinLoc;
+ cv::Point cMaxLoc;
+ cv::minMaxLoc(*m, minVal, maxVal, &cMinLoc, &cMaxLoc);
+
+ minLoc->x = cMinLoc.x;
+ minLoc->y = cMinLoc.y;
+ maxLoc->x = cMaxLoc.x;
+ maxLoc->y = cMaxLoc.y;
+}
+
+void Mat_MulSpectrums(Mat a, Mat b, Mat c, int flags) {
+ cv::mulSpectrums(*a, *b, *c, flags);
+}
+
+void Mat_Multiply(Mat src1, Mat src2, Mat dst) {
+ cv::multiply(*src1, *src2, *dst);
+}
+
+void Mat_Normalize(Mat src, Mat dst, double alpha, double beta, int typ) {
+ cv::normalize(*src, *dst, alpha, beta, typ);
+}
+
+double Norm(Mat src1, int normType) {
+ return cv::norm(*src1, normType);
+}
+
+void Mat_PerspectiveTransform(Mat src, Mat dst, Mat tm) {
+ cv::perspectiveTransform(*src, *dst, *tm);
+}
+
+bool Mat_Solve(Mat src1, Mat src2, Mat dst, int flags) {
+ return cv::solve(*src1, *src2, *dst, flags);
+}
+
+int Mat_SolveCubic(Mat coeffs, Mat roots) {
+ return cv::solveCubic(*coeffs, *roots);
+}
+
+double Mat_SolvePoly(Mat coeffs, Mat roots, int maxIters) {
+ return cv::solvePoly(*coeffs, *roots, maxIters);
+}
+
+void Mat_Reduce(Mat src, Mat dst, int dim, int rType, int dType) {
+ cv::reduce(*src, *dst, dim, rType, dType);
+}
+
+void Mat_Repeat(Mat src, int nY, int nX, Mat dst) {
+ cv::repeat(*src, nY, nX, *dst);
+}
+
+void Mat_ScaleAdd(Mat src1, double alpha, Mat src2, Mat dst) {
+ cv::scaleAdd(*src1, alpha, *src2, *dst);
+}
+
+void Mat_SetIdentity(Mat src, double scalar) {
+ cv::setIdentity(*src, scalar);
+}
+
+void Mat_Sort(Mat src, Mat dst, int flags) {
+ cv::sort(*src, *dst, flags);
+}
+
+void Mat_SortIdx(Mat src, Mat dst, int flags) {
+ cv::sortIdx(*src, *dst, flags);
+}
+
+void Mat_Split(Mat src, struct Mats* mats) {
+ std::vector channels;
+ cv::split(*src, channels);
+ mats->mats = new Mat[channels.size()];
+
+ for (size_t i = 0; i < channels.size(); ++i) {
+ mats->mats[i] = new cv::Mat(channels[i]);
+ }
+
+ mats->length = (int)channels.size();
+}
+
+void Mat_Subtract(Mat src1, Mat src2, Mat dst) {
+ cv::subtract(*src1, *src2, *dst);
+}
+
+Scalar Mat_Trace(Mat src) {
+ cv::Scalar c = cv::trace(*src);
+ Scalar scal = Scalar();
+ scal.val1 = c.val[0];
+ scal.val2 = c.val[1];
+ scal.val3 = c.val[2];
+ scal.val4 = c.val[3];
+ return scal;
+}
+
+void Mat_Transform(Mat src, Mat dst, Mat tm) {
+ cv::transform(*src, *dst, *tm);
+}
+
+void Mat_Transpose(Mat src, Mat dst) {
+ cv::transpose(*src, *dst);
+}
+
+void Mat_PolarToCart(Mat magnitude, Mat degree, Mat x, Mat y, bool angleInDegrees) {
+ cv::polarToCart(*magnitude, *degree, *x, *y, angleInDegrees);
+}
+
+void Mat_Pow(Mat src, double power, Mat dst) {
+ cv::pow(*src, power, *dst);
+}
+
+void Mat_Phase(Mat x, Mat y, Mat angle, bool angleInDegrees) {
+ cv::phase(*x, *y, *angle, angleInDegrees);
+}
+
+
+Scalar Mat_Sum(Mat src) {
+ cv::Scalar c = cv::sum(*src);
+ Scalar scal = Scalar();
+ scal.val1 = c.val[0];
+ scal.val2 = c.val[1];
+ scal.val3 = c.val[2];
+ scal.val4 = c.val[3];
+ return scal;
+}
+
+// TermCriteria_New creates a new TermCriteria
+TermCriteria TermCriteria_New(int typ, int maxCount, double epsilon) {
+ return new cv::TermCriteria(typ, maxCount, epsilon);
+}
+
+void Contours_Close(struct Contours cs) {
+ for (int i = 0; i < cs.length; i++) {
+ Points_Close(cs.contours[i]);
+ }
+
+ delete[] cs.contours;
+}
+
+void KeyPoints_Close(struct KeyPoints ks) {
+ delete[] ks.keypoints;
+}
+
+void Points_Close(Points ps) {
+ for (size_t i = 0; i < ps.length; i++) {
+ Point_Close(ps.points[i]);
+ }
+
+ delete[] ps.points;
+}
+
+void Point_Close(Point p) {}
+
+void Rects_Close(struct Rects rs) {
+ delete[] rs.rects;
+}
+
+void DMatches_Close(struct DMatches ds) {
+ delete[] ds.dmatches;
+}
+
+void MultiDMatches_Close(struct MultiDMatches mds) {
+ for (size_t i = 0; i < mds.length; i++) {
+ DMatches_Close(mds.dmatches[i]);
+ }
+
+ delete[] mds.dmatches;
+}
+
+struct DMatches MultiDMatches_get(struct MultiDMatches mds, int index) {
+ return mds.dmatches[index];
+}
+
+// since it is next to impossible to iterate over mats.mats on the cgo side
+Mat Mats_get(struct Mats mats, int i) {
+ return mats.mats[i];
+}
+
+void Mats_Close(struct Mats mats) {
+ delete[] mats.mats;
+}
+
+void ByteArray_Release(struct ByteArray buf) {
+ delete[] buf.data;
+}
+
+struct ByteArray toByteArray(const char* buf, int len) {
+ ByteArray ret = {new char[len], len};
+ memcpy(ret.data, buf, len);
+ return ret;
+}
+
+int64 GetCVTickCount() {
+ return cv::getTickCount();
+}
+
+double GetTickFrequency() {
+ return cv::getTickFrequency();
+}
+
+Mat Mat_rowRange(Mat m,int startrow,int endrow) {
+ return new cv::Mat(m->rowRange(startrow,endrow));
+}
+
+Mat Mat_colRange(Mat m,int startrow,int endrow) {
+ return new cv::Mat(m->colRange(startrow,endrow));
+}
+
diff --git a/vendor/gocv.io/x/gocv/core.go b/vendor/gocv.io/x/gocv/core.go
new file mode 100644
index 0000000..031be3c
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/core.go
@@ -0,0 +1,1975 @@
+package gocv
+
+/*
+#include
+#include "core.h"
+*/
+import "C"
+import (
+ "errors"
+ "image"
+ "image/color"
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // MatChannels1 is a single channel Mat.
+ MatChannels1 = 0
+
+ // MatChannels2 is 2 channel Mat.
+ MatChannels2 = 8
+
+ // MatChannels3 is 3 channel Mat.
+ MatChannels3 = 16
+
+ // MatChannels4 is 4 channel Mat.
+ MatChannels4 = 24
+)
+
+// MatType is the type for the various different kinds of Mat you can create.
+type MatType int
+
+const (
+ // MatTypeCV8U is a Mat of 8-bit unsigned int
+ MatTypeCV8U MatType = 0
+
+ // MatTypeCV8S is a Mat of 8-bit signed int
+ MatTypeCV8S = 1
+
+ // MatTypeCV16U is a Mat of 16-bit unsigned int
+ MatTypeCV16U = 2
+
+ // MatTypeCV16S is a Mat of 16-bit signed int
+ MatTypeCV16S = 3
+
+ // MatTypeCV16SC2 is a Mat of 16-bit signed int with 2 channels
+ MatTypeCV16SC2 = MatTypeCV16S + MatChannels2
+
+ // MatTypeCV32S is a Mat of 32-bit signed int
+ MatTypeCV32S = 4
+
+ // MatTypeCV32F is a Mat of 32-bit float
+ MatTypeCV32F = 5
+
+ // MatTypeCV64F is a Mat of 64-bit float
+ MatTypeCV64F = 6
+
+ // MatTypeCV8UC1 is a Mat of 8-bit unsigned int with a single channel
+ MatTypeCV8UC1 = MatTypeCV8U + MatChannels1
+
+ // MatTypeCV8UC2 is a Mat of 8-bit unsigned int with 2 channels
+ MatTypeCV8UC2 = MatTypeCV8U + MatChannels2
+
+ // MatTypeCV8UC3 is a Mat of 8-bit unsigned int with 3 channels
+ MatTypeCV8UC3 = MatTypeCV8U + MatChannels3
+
+ // MatTypeCV8UC4 is a Mat of 8-bit unsigned int with 4 channels
+ MatTypeCV8UC4 = MatTypeCV8U + MatChannels4
+
+ // MatTypeCV8SC1 is a Mat of 8-bit signed int with a single channel
+ MatTypeCV8SC1 = MatTypeCV8S + MatChannels1
+
+ // MatTypeCV8SC2 is a Mat of 8-bit signed int with 2 channels
+ MatTypeCV8SC2 = MatTypeCV8S + MatChannels2
+
+ // MatTypeCV8SC3 is a Mat of 8-bit signed int with 3 channels
+ MatTypeCV8SC3 = MatTypeCV8S + MatChannels3
+
+ // MatTypeCV8SC4 is a Mat of 8-bit signed int with 4 channels
+ MatTypeCV8SC4 = MatTypeCV8S + MatChannels4
+
+ // MatTypeCV16UC1 is a Mat of 16-bit unsigned int with a single channel
+ MatTypeCV16UC1 = MatTypeCV16U + MatChannels1
+
+ // MatTypeCV16UC2 is a Mat of 16-bit unsigned int with 2 channels
+ MatTypeCV16UC2 = MatTypeCV16U + MatChannels2
+
+ // MatTypeCV16UC3 is a Mat of 16-bit unsigned int with 3 channels
+ MatTypeCV16UC3 = MatTypeCV16U + MatChannels3
+
+ // MatTypeCV16UC4 is a Mat of 16-bit unsigned int with 4 channels
+ MatTypeCV16UC4 = MatTypeCV16U + MatChannels4
+
+ // MatTypeCV16SC1 is a Mat of 16-bit signed int with a single channel
+ MatTypeCV16SC1 = MatTypeCV16S + MatChannels1
+
+ // MatTypeCV16SC3 is a Mat of 16-bit signed int with 3 channels
+ MatTypeCV16SC3 = MatTypeCV16S + MatChannels3
+
+ // MatTypeCV16SC4 is a Mat of 16-bit signed int with 4 channels
+ MatTypeCV16SC4 = MatTypeCV16S + MatChannels4
+
+ // MatTypeCV32SC1 is a Mat of 32-bit signed int with a single channel
+ MatTypeCV32SC1 = MatTypeCV32S + MatChannels1
+
+ // MatTypeCV32SC2 is a Mat of 32-bit signed int with 2 channels
+ MatTypeCV32SC2 = MatTypeCV32S + MatChannels2
+
+ // MatTypeCV32SC3 is a Mat of 32-bit signed int with 3 channels
+ MatTypeCV32SC3 = MatTypeCV32S + MatChannels3
+
+ // MatTypeCV32SC4 is a Mat of 32-bit signed int with 4 channels
+ MatTypeCV32SC4 = MatTypeCV32S + MatChannels4
+
+ // MatTypeCV32FC1 is a Mat of 32-bit float int with a single channel
+ MatTypeCV32FC1 = MatTypeCV32F + MatChannels1
+
+ // MatTypeCV32FC2 is a Mat of 32-bit float int with 2 channels
+ MatTypeCV32FC2 = MatTypeCV32F + MatChannels2
+
+ // MatTypeCV32FC3 is a Mat of 32-bit float int with 3 channels
+ MatTypeCV32FC3 = MatTypeCV32F + MatChannels3
+
+ // MatTypeCV32FC4 is a Mat of 32-bit float int with 4 channels
+ MatTypeCV32FC4 = MatTypeCV32F + MatChannels4
+
+ // MatTypeCV64FC1 is a Mat of 64-bit float int with a single channel
+ MatTypeCV64FC1 = MatTypeCV64F + MatChannels1
+
+ // MatTypeCV64FC2 is a Mat of 64-bit float int with 2 channels
+ MatTypeCV64FC2 = MatTypeCV64F + MatChannels2
+
+ // MatTypeCV64FC3 is a Mat of 64-bit float int with 3 channels
+ MatTypeCV64FC3 = MatTypeCV64F + MatChannels3
+
+ // MatTypeCV64FC4 is a Mat of 64-bit float int with 4 channels
+ MatTypeCV64FC4 = MatTypeCV64F + MatChannels4
+)
+
+// CompareType is used for Compare operations to indicate which kind of
+// comparison to use.
+type CompareType int
+
+const (
+ // CompareEQ src1 is equal to src2.
+ CompareEQ CompareType = 0
+
+ // CompareGT src1 is greater than src2.
+ CompareGT = 1
+
+ // CompareGE src1 is greater than or equal to src2.
+ CompareGE = 2
+
+ // CompareLT src1 is less than src2.
+ CompareLT = 3
+
+ // CompareLE src1 is less than or equal to src2.
+ CompareLE = 4
+
+ // CompareNE src1 is unequal to src2.
+ CompareNE = 5
+)
+
+var ErrEmptyByteSlice = errors.New("empty byte array")
+
+// Mat represents an n-dimensional dense numerical single-channel
+// or multi-channel array. It can be used to store real or complex-valued
+// vectors and matrices, grayscale or color images, voxel volumes,
+// vector fields, point clouds, tensors, and histograms.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html
+//
+type Mat struct {
+ p C.Mat
+}
+
+// NewMat returns a new empty Mat.
+func NewMat() Mat {
+ return newMat(C.Mat_New())
+}
+
+// NewMatWithSize returns a new Mat with a specific size and type.
+func NewMatWithSize(rows int, cols int, mt MatType) Mat {
+ return newMat(C.Mat_NewWithSize(C.int(rows), C.int(cols), C.int(mt)))
+}
+
+// NewMatFromScalar returns a new Mat for a specific Scalar value
+func NewMatFromScalar(s Scalar, mt MatType) Mat {
+ sVal := C.struct_Scalar{
+ val1: C.double(s.Val1),
+ val2: C.double(s.Val2),
+ val3: C.double(s.Val3),
+ val4: C.double(s.Val4),
+ }
+
+ return newMat(C.Mat_NewFromScalar(sVal, C.int(mt)))
+}
+
+// NewMatWithSizeFromScalar returns a new Mat for a specific Scala value with a specific size and type
+// This simplifies creation of specific color filters or creating Mats of specific colors and sizes
+func NewMatWithSizeFromScalar(s Scalar, rows int, cols int, mt MatType) Mat {
+ sVal := C.struct_Scalar{
+ val1: C.double(s.Val1),
+ val2: C.double(s.Val2),
+ val3: C.double(s.Val3),
+ val4: C.double(s.Val4),
+ }
+
+ return newMat(C.Mat_NewWithSizeFromScalar(sVal, C.int(rows), C.int(cols), C.int(mt)))
+}
+
+// NewMatFromBytes returns a new Mat with a specific size and type, initialized from a []byte.
+func NewMatFromBytes(rows int, cols int, mt MatType, data []byte) (Mat, error) {
+ cBytes, err := toByteArray(data)
+ if err != nil {
+ return Mat{}, err
+ }
+ return newMat(C.Mat_NewFromBytes(C.int(rows), C.int(cols), C.int(mt), *cBytes)), nil
+}
+
+// FromPtr returns a new Mat with a specific size and type, initialized from a Mat Ptr.
+func (m *Mat) FromPtr(rows int, cols int, mt MatType, prow int, pcol int) (Mat, error) {
+ return newMat(C.Mat_FromPtr(m.p, C.int(rows), C.int(cols), C.int(mt), C.int(prow), C.int(pcol))), nil
+}
+
+// Ptr returns the Mat's underlying object pointer.
+func (m *Mat) Ptr() C.Mat {
+ return m.p
+}
+
+// Empty determines if the Mat is empty or not.
+func (m *Mat) Empty() bool {
+ isEmpty := C.Mat_Empty(m.p)
+ return isEmpty != 0
+}
+
+// Clone returns a cloned full copy of the Mat.
+func (m *Mat) Clone() Mat {
+ return newMat(C.Mat_Clone(m.p))
+}
+
+// CopyTo copies Mat into destination Mat.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#a33fd5d125b4c302b0c9aa86980791a77
+//
+func (m *Mat) CopyTo(dst *Mat) {
+ C.Mat_CopyTo(m.p, dst.p)
+ return
+}
+
+// CopyToWithMask copies Mat into destination Mat after applying the mask Mat.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#a626fe5f96d02525e2604d2ad46dd574f
+//
+func (m *Mat) CopyToWithMask(dst *Mat, mask Mat) {
+ C.Mat_CopyToWithMask(m.p, dst.p, mask.p)
+ return
+}
+
+// ConvertTo converts Mat into destination Mat.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#adf88c60c5b4980e05bb556080916978b
+//
+func (m *Mat) ConvertTo(dst *Mat, mt MatType) {
+ C.Mat_ConvertTo(m.p, dst.p, C.int(mt))
+ return
+}
+
+// Total returns the total number of array elements.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#aa4d317d43fb0cba9c2503f3c61b866c8
+//
+func (m *Mat) Total() int {
+ return int(C.Mat_Total(m.p))
+}
+
+// Size returns an array with one element for each dimension containing the size of that dimension for the Mat.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#aa4d317d43fb0cba9c2503f3c61b866c8
+//
+func (m *Mat) Size() (dims []int) {
+ cdims := C.IntVector{}
+ C.Mat_Size(m.p, &cdims)
+
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cdims.val)),
+ Len: int(cdims.length),
+ Cap: int(cdims.length),
+ }
+ pdims := *(*[]C.int)(unsafe.Pointer(h))
+
+ for i := 0; i < int(cdims.length); i++ {
+ dims = append(dims, int(pdims[i]))
+ }
+ return
+}
+
+// ToBytes copies the underlying Mat data to a byte array.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.3.1/d3/d63/classcv_1_1Mat.html#a4d33bed1c850265370d2af0ff02e1564
+func (m *Mat) ToBytes() []byte {
+ b := C.Mat_DataPtr(m.p)
+ return toGoBytes(b)
+}
+
+// DataPtrUint8 returns a slice that references the OpenCV allocated data.
+//
+// The data is no longer valid once the Mat has been closed. Any data that
+// needs to be accessed after the Mat is closed must be copied into Go memory.
+func (m *Mat) DataPtrUint8() []uint8 {
+ p := C.Mat_DataPtr(m.p)
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p.data)),
+ Len: int(p.length),
+ Cap: int(p.length),
+ }
+ return *(*[]uint8)(unsafe.Pointer(h))
+}
+
+// DataPtrInt8 returns a slice that references the OpenCV allocated data.
+//
+// The data is no longer valid once the Mat has been closed. Any data that
+// needs to be accessed after the Mat is closed must be copied into Go memory.
+func (m *Mat) DataPtrInt8() []int8 {
+ p := C.Mat_DataPtr(m.p)
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p.data)),
+ Len: int(p.length),
+ Cap: int(p.length),
+ }
+ return *(*[]int8)(unsafe.Pointer(h))
+}
+
+// DataPtrUint16 returns a slice that references the OpenCV allocated data.
+//
+// The data is no longer valid once the Mat has been closed. Any data that
+// needs to be accessed after the Mat is closed must be copied into Go memory.
+func (m *Mat) DataPtrUint16() ([]uint16, error) {
+ if m.Type()&MatTypeCV16U != MatTypeCV16U {
+ return nil, errors.New("DataPtrUint16 only supports MatTypeCV16U")
+ }
+
+ p := C.Mat_DataPtr(m.p)
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p.data)),
+ Len: int(p.length) / 2,
+ Cap: int(p.length) / 2,
+ }
+ return *(*[]uint16)(unsafe.Pointer(h)), nil
+}
+
+// DataPtrInt16 returns a slice that references the OpenCV allocated data.
+//
+// The data is no longer valid once the Mat has been closed. Any data that
+// needs to be accessed after the Mat is closed must be copied into Go memory.
+func (m *Mat) DataPtrInt16() ([]int16, error) {
+ if m.Type()&MatTypeCV16S != MatTypeCV16S {
+ return nil, errors.New("DataPtrInt16 only supports MatTypeCV16S")
+ }
+
+ p := C.Mat_DataPtr(m.p)
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p.data)),
+ Len: int(p.length) / 2,
+ Cap: int(p.length) / 2,
+ }
+ return *(*[]int16)(unsafe.Pointer(h)), nil
+}
+
+// DataPtrFloat32 returns a slice that references the OpenCV allocated data.
+//
+// The data is no longer valid once the Mat has been closed. Any data that
+// needs to be accessed after the Mat is closed must be copied into Go memory.
+func (m *Mat) DataPtrFloat32() ([]float32, error) {
+ if m.Type()&MatTypeCV32F != MatTypeCV32F {
+ return nil, errors.New("DataPtrFloat32 only supports MatTypeCV32F")
+ }
+
+ p := C.Mat_DataPtr(m.p)
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p.data)),
+ Len: int(p.length) / 4,
+ Cap: int(p.length) / 4,
+ }
+ return *(*[]float32)(unsafe.Pointer(h)), nil
+}
+
+// DataPtrFloat64 returns a slice that references the OpenCV allocated data.
+//
+// The data is no longer valid once the Mat has been closed. Any data that
+// needs to be accessed after the Mat is closed must be copied into Go memory.
+func (m *Mat) DataPtrFloat64() ([]float64, error) {
+ if m.Type()&MatTypeCV64F != MatTypeCV64F {
+ return nil, errors.New("DataPtrFloat64 only supports MatTypeCV64F")
+ }
+
+ p := C.Mat_DataPtr(m.p)
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p.data)),
+ Len: int(p.length) / 8,
+ Cap: int(p.length) / 8,
+ }
+ return *(*[]float64)(unsafe.Pointer(h)), nil
+}
+
+// Region returns a new Mat that points to a region of this Mat. Changes made to the
+// region Mat will affect the original Mat, since they are pointers to the underlying
+// OpenCV Mat object.
+func (m *Mat) Region(rio image.Rectangle) Mat {
+ cRect := C.struct_Rect{
+ x: C.int(rio.Min.X),
+ y: C.int(rio.Min.Y),
+ width: C.int(rio.Size().X),
+ height: C.int(rio.Size().Y),
+ }
+
+ return newMat(C.Mat_Region(m.p, cRect))
+}
+
+// Reshape changes the shape and/or the number of channels of a 2D matrix without copying the data.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#a4eb96e3251417fa88b78e2abd6cfd7d8
+//
+func (m *Mat) Reshape(cn int, rows int) Mat {
+ return newMat(C.Mat_Reshape(m.p, C.int(cn), C.int(rows)))
+}
+
+// ConvertFp16 converts a Mat to half-precision floating point.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga9c25d9ef44a2a48ecc3774b30cb80082
+//
+func (m *Mat) ConvertFp16() Mat {
+ return newMat(C.Mat_ConvertFp16(m.p))
+}
+
+// Mean calculates the mean value M of array elements, independently for each channel, and return it as Scalar
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga191389f8a0e58180bb13a727782cd461
+//
+func (m *Mat) Mean() Scalar {
+ s := C.Mat_Mean(m.p)
+ return NewScalar(float64(s.val1), float64(s.val2), float64(s.val3), float64(s.val4))
+}
+
+// MeanWithMask calculates the mean value M of array elements,independently for each channel,
+// and returns it as Scalar vector while applying the mask.
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga191389f8a0e58180bb13a727782cd461
+//
+func (m *Mat) MeanWithMask(mask Mat) Scalar {
+ s := C.Mat_MeanWithMask(m.p, mask.p)
+ return NewScalar(float64(s.val1), float64(s.val2), float64(s.val3), float64(s.val4))
+}
+
+// Sqrt calculates a square root of array elements.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga186222c3919657890f88df5a1f64a7d7
+//
+func (m *Mat) Sqrt() Mat {
+ return newMat(C.Mat_Sqrt(m.p))
+}
+
+// Sum calculates the per-channel pixel sum of an image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga716e10a2dd9e228e4d3c95818f106722
+//
+func (m *Mat) Sum() Scalar {
+ s := C.Mat_Sum(m.p)
+ return NewScalar(float64(s.val1), float64(s.val2), float64(s.val3), float64(s.val4))
+}
+
+// PatchNaNs converts NaN's to zeros.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga62286befb7cde3568ff8c7d14d5079da
+//
+func (m *Mat) PatchNaNs() {
+ C.Mat_PatchNaNs(m.p)
+}
+
+// LUT performs a look-up table transform of an array.
+//
+// The function LUT fills the output array with values from the look-up table.
+// Indices of the entries are taken from the input array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gab55b8d062b7f5587720ede032d34156f
+func LUT(src, wbLUT Mat, dst *Mat) {
+ C.LUT(src.p, wbLUT.p, dst.p)
+}
+
+// Rows returns the number of rows for this Mat.
+func (m *Mat) Rows() int {
+ return int(C.Mat_Rows(m.p))
+}
+
+// Cols returns the number of columns for this Mat.
+func (m *Mat) Cols() int {
+ return int(C.Mat_Cols(m.p))
+}
+
+// Channels returns the number of channels for this Mat.
+func (m *Mat) Channels() int {
+ return int(C.Mat_Channels(m.p))
+}
+
+// Type returns the type for this Mat.
+func (m *Mat) Type() MatType {
+ return MatType(C.Mat_Type(m.p))
+}
+
+// Step returns the number of bytes each matrix row occupies.
+func (m *Mat) Step() int {
+ return int(C.Mat_Step(m.p))
+}
+
+// GetUCharAt returns a value from a specific row/col
+// in this Mat expecting it to be of type uchar aka CV_8U.
+func (m *Mat) GetUCharAt(row int, col int) uint8 {
+ return uint8(C.Mat_GetUChar(m.p, C.int(row), C.int(col)))
+}
+
+// GetUCharAt3 returns a value from a specific x, y, z coordinate location
+// in this Mat expecting it to be of type uchar aka CV_8U.
+func (m *Mat) GetUCharAt3(x, y, z int) uint8 {
+ return uint8(C.Mat_GetUChar3(m.p, C.int(x), C.int(y), C.int(z)))
+}
+
+// GetSCharAt returns a value from a specific row/col
+// in this Mat expecting it to be of type schar aka CV_8S.
+func (m *Mat) GetSCharAt(row int, col int) int8 {
+ return int8(C.Mat_GetSChar(m.p, C.int(row), C.int(col)))
+}
+
+// GetSCharAt3 returns a value from a specific x, y, z coordinate location
+// in this Mat expecting it to be of type schar aka CV_8S.
+func (m *Mat) GetSCharAt3(x, y, z int) int8 {
+ return int8(C.Mat_GetSChar3(m.p, C.int(x), C.int(y), C.int(z)))
+}
+
+// GetShortAt returns a value from a specific row/col
+// in this Mat expecting it to be of type short aka CV_16S.
+func (m *Mat) GetShortAt(row int, col int) int16 {
+ return int16(C.Mat_GetShort(m.p, C.int(row), C.int(col)))
+}
+
+// GetShortAt3 returns a value from a specific x, y, z coordinate location
+// in this Mat expecting it to be of type short aka CV_16S.
+func (m *Mat) GetShortAt3(x, y, z int) int16 {
+ return int16(C.Mat_GetShort3(m.p, C.int(x), C.int(y), C.int(z)))
+}
+
+// GetIntAt returns a value from a specific row/col
+// in this Mat expecting it to be of type int aka CV_32S.
+func (m *Mat) GetIntAt(row int, col int) int32 {
+ return int32(C.Mat_GetInt(m.p, C.int(row), C.int(col)))
+}
+
+// GetIntAt3 returns a value from a specific x, y, z coordinate location
+// in this Mat expecting it to be of type int aka CV_32S.
+func (m *Mat) GetIntAt3(x, y, z int) int32 {
+ return int32(C.Mat_GetInt3(m.p, C.int(x), C.int(y), C.int(z)))
+}
+
+// GetFloatAt returns a value from a specific row/col
+// in this Mat expecting it to be of type float aka CV_32F.
+func (m *Mat) GetFloatAt(row int, col int) float32 {
+ return float32(C.Mat_GetFloat(m.p, C.int(row), C.int(col)))
+}
+
+// GetFloatAt3 returns a value from a specific x, y, z coordinate location
+// in this Mat expecting it to be of type float aka CV_32F.
+func (m *Mat) GetFloatAt3(x, y, z int) float32 {
+ return float32(C.Mat_GetFloat3(m.p, C.int(x), C.int(y), C.int(z)))
+}
+
+// GetDoubleAt returns a value from a specific row/col
+// in this Mat expecting it to be of type double aka CV_64F.
+func (m *Mat) GetDoubleAt(row int, col int) float64 {
+ return float64(C.Mat_GetDouble(m.p, C.int(row), C.int(col)))
+}
+
+// GetDoubleAt3 returns a value from a specific x, y, z coordinate location
+// in this Mat expecting it to be of type double aka CV_64F.
+func (m *Mat) GetDoubleAt3(x, y, z int) float64 {
+ return float64(C.Mat_GetDouble3(m.p, C.int(x), C.int(y), C.int(z)))
+}
+
+// SetTo sets all or some of the array elements to the specified scalar value.
+func (m *Mat) SetTo(s Scalar) {
+ sVal := C.struct_Scalar{
+ val1: C.double(s.Val1),
+ val2: C.double(s.Val2),
+ val3: C.double(s.Val3),
+ val4: C.double(s.Val4),
+ }
+
+ C.Mat_SetTo(m.p, sVal)
+}
+
+// SetUCharAt sets a value at a specific row/col
+// in this Mat expecting it to be of type uchar aka CV_8U.
+func (m *Mat) SetUCharAt(row int, col int, val uint8) {
+ C.Mat_SetUChar(m.p, C.int(row), C.int(col), C.uint8_t(val))
+}
+
+// SetUCharAt3 sets a value at a specific x, y, z coordinate location
+// in this Mat expecting it to be of type uchar aka CV_8U.
+func (m *Mat) SetUCharAt3(x, y, z int, val uint8) {
+ C.Mat_SetUChar3(m.p, C.int(x), C.int(y), C.int(z), C.uint8_t(val))
+}
+
+// SetSCharAt sets a value at a specific row/col
+// in this Mat expecting it to be of type schar aka CV_8S.
+func (m *Mat) SetSCharAt(row int, col int, val int8) {
+ C.Mat_SetSChar(m.p, C.int(row), C.int(col), C.int8_t(val))
+}
+
+// SetSCharAt3 sets a value at a specific x, y, z coordinate location
+// in this Mat expecting it to be of type schar aka CV_8S.
+func (m *Mat) SetSCharAt3(x, y, z int, val int8) {
+ C.Mat_SetSChar3(m.p, C.int(x), C.int(y), C.int(z), C.int8_t(val))
+}
+
+// SetShortAt sets a value at a specific row/col
+// in this Mat expecting it to be of type short aka CV_16S.
+func (m *Mat) SetShortAt(row int, col int, val int16) {
+ C.Mat_SetShort(m.p, C.int(row), C.int(col), C.int16_t(val))
+}
+
+// SetShortAt3 sets a value at a specific x, y, z coordinate location
+// in this Mat expecting it to be of type short aka CV_16S.
+func (m *Mat) SetShortAt3(x, y, z int, val int16) {
+ C.Mat_SetShort3(m.p, C.int(x), C.int(y), C.int(z), C.int16_t(val))
+}
+
+// SetIntAt sets a value at a specific row/col
+// in this Mat expecting it to be of type int aka CV_32S.
+func (m *Mat) SetIntAt(row int, col int, val int32) {
+ C.Mat_SetInt(m.p, C.int(row), C.int(col), C.int32_t(val))
+}
+
+// SetIntAt3 sets a value at a specific x, y, z coordinate location
+// in this Mat expecting it to be of type int aka CV_32S.
+func (m *Mat) SetIntAt3(x, y, z int, val int32) {
+ C.Mat_SetInt3(m.p, C.int(x), C.int(y), C.int(z), C.int32_t(val))
+}
+
+// SetFloatAt sets a value at a specific row/col
+// in this Mat expecting it to be of type float aka CV_32F.
+func (m *Mat) SetFloatAt(row int, col int, val float32) {
+ C.Mat_SetFloat(m.p, C.int(row), C.int(col), C.float(val))
+}
+
+// SetFloatAt3 sets a value at a specific x, y, z coordinate location
+// in this Mat expecting it to be of type float aka CV_32F.
+func (m *Mat) SetFloatAt3(x, y, z int, val float32) {
+ C.Mat_SetFloat3(m.p, C.int(x), C.int(y), C.int(z), C.float(val))
+}
+
+// SetDoubleAt sets a value at a specific row/col
+// in this Mat expecting it to be of type double aka CV_64F.
+func (m *Mat) SetDoubleAt(row int, col int, val float64) {
+ C.Mat_SetDouble(m.p, C.int(row), C.int(col), C.double(val))
+}
+
+// SetDoubleAt3 sets a value at a specific x, y, z coordinate location
+// in this Mat expecting it to be of type double aka CV_64F.
+func (m *Mat) SetDoubleAt3(x, y, z int, val float64) {
+ C.Mat_SetDouble3(m.p, C.int(x), C.int(y), C.int(z), C.double(val))
+}
+
+// AddUChar adds a uchar value to each element in the Mat. Performs a
+// mat += val operation.
+func (m *Mat) AddUChar(val uint8) {
+ C.Mat_AddUChar(m.p, C.uint8_t(val))
+}
+
+// SubtractUChar subtracts a uchar value from each element in the Mat. Performs a
+// mat -= val operation.
+func (m *Mat) SubtractUChar(val uint8) {
+ C.Mat_SubtractUChar(m.p, C.uint8_t(val))
+}
+
+// MultiplyUChar multiplies each element in the Mat by a uint value. Performs a
+// mat *= val operation.
+func (m *Mat) MultiplyUChar(val uint8) {
+ C.Mat_MultiplyUChar(m.p, C.uint8_t(val))
+}
+
+// DivideUChar divides each element in the Mat by a uint value. Performs a
+// mat /= val operation.
+func (m *Mat) DivideUChar(val uint8) {
+ C.Mat_DivideUChar(m.p, C.uint8_t(val))
+}
+
+// AddFloat adds a float value to each element in the Mat. Performs a
+// mat += val operation.
+func (m *Mat) AddFloat(val float32) {
+ C.Mat_AddFloat(m.p, C.float(val))
+}
+
+// SubtractFloat subtracts a float value from each element in the Mat. Performs a
+// mat -= val operation.
+func (m *Mat) SubtractFloat(val float32) {
+ C.Mat_SubtractFloat(m.p, C.float(val))
+}
+
+// MultiplyFloat multiplies each element in the Mat by a float value. Performs a
+// mat *= val operation.
+func (m *Mat) MultiplyFloat(val float32) {
+ C.Mat_MultiplyFloat(m.p, C.float(val))
+}
+
+// DivideFloat divides each element in the Mat by a float value. Performs a
+// mat /= val operation.
+func (m *Mat) DivideFloat(val float32) {
+ C.Mat_DivideFloat(m.p, C.float(val))
+}
+
+// MultiplyMatrix multiplies matrix (m*x)
+func (m *Mat) MultiplyMatrix(x Mat) Mat {
+ return newMat(C.Mat_MultiplyMatrix(m.p, x.p))
+}
+
+// T transpose matrix
+// https://docs.opencv.org/4.1.2/d3/d63/classcv_1_1Mat.html#aaa428c60ccb6d8ea5de18f63dfac8e11
+func (m *Mat) T() Mat {
+ return newMat(C.Mat_T(m.p))
+}
+
+// ToImage converts a Mat to a image.Image.
+func (m *Mat) ToImage() (image.Image, error) {
+ t := m.Type()
+ if t != MatTypeCV8UC1 && t != MatTypeCV8UC3 && t != MatTypeCV8UC4 {
+ return nil, errors.New("ToImage supports only MatType CV8UC1, CV8UC3 and CV8UC4")
+ }
+
+ width := m.Cols()
+ height := m.Rows()
+ step := m.Step()
+ data := m.ToBytes()
+ channels := m.Channels()
+
+ if t == MatTypeCV8UC1 {
+ img := image.NewGray(image.Rect(0, 0, width, height))
+ c := color.Gray{Y: uint8(0)}
+
+ for y := 0; y < height; y++ {
+ for x := 0; x < width; x++ {
+ c.Y = uint8(data[y*step+x])
+ img.SetGray(x, y, c)
+ }
+ }
+
+ return img, nil
+ }
+
+ img := image.NewRGBA(image.Rect(0, 0, width, height))
+ c := color.RGBA{
+ R: uint8(0),
+ G: uint8(0),
+ B: uint8(0),
+ A: uint8(255),
+ }
+
+ for y := 0; y < height; y++ {
+ for x := 0; x < step; x = x + channels {
+ c.B = uint8(data[y*step+x])
+ c.G = uint8(data[y*step+x+1])
+ c.R = uint8(data[y*step+x+2])
+ if channels == 4 {
+ c.A = uint8(data[y*step+x+3])
+ }
+ img.SetRGBA(int(x/channels), y, c)
+ }
+ }
+
+ return img, nil
+}
+
+//ImageToMatRGBA converts image.Image to gocv.Mat,
+//which represents RGBA image having 8bit for each component.
+//Type of Mat is gocv.MatTypeCV8UC4.
+func ImageToMatRGBA(img image.Image) (Mat, error) {
+ bounds := img.Bounds()
+ x := bounds.Dx()
+ y := bounds.Dy()
+ data := make([]byte, 0, x*y*4)
+ for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
+ for i := bounds.Min.X; i < bounds.Max.X; i++ {
+ r, g, b, a := img.At(i, j).RGBA()
+ data = append(data, byte(b>>8), byte(g>>8), byte(r>>8), byte(a>>8))
+ }
+ }
+ return NewMatFromBytes(y, x, MatTypeCV8UC4, data)
+}
+
+//ImageToMatRGB converts image.Image to gocv.Mat,
+//which represents RGB image having 8bit for each component.
+//Type of Mat is gocv.MatTypeCV8UC3.
+func ImageToMatRGB(img image.Image) (Mat, error) {
+ bounds := img.Bounds()
+ x := bounds.Dx()
+ y := bounds.Dy()
+ data := make([]byte, 0, x*y*3)
+ for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
+ for i := bounds.Min.X; i < bounds.Max.X; i++ {
+ r, g, b, _ := img.At(i, j).RGBA()
+ data = append(data, byte(b>>8), byte(g>>8), byte(r>>8))
+ }
+ }
+ return NewMatFromBytes(y, x, MatTypeCV8UC3, data)
+}
+
+//ImageGrayToMatGray converts image.Gray to gocv.Mat,
+//which represents grayscale image 8bit.
+//Type of Mat is gocv.MatTypeCV8UC1.
+func ImageGrayToMatGray(img *image.Gray) (Mat, error) {
+ bounds := img.Bounds()
+ x := bounds.Dx()
+ y := bounds.Dy()
+ data := make([]byte, 0, x*y)
+ for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
+ for i := bounds.Min.X; i < bounds.Max.X; i++ {
+ data = append(data, img.GrayAt(i, j).Y)
+ }
+ }
+ return NewMatFromBytes(y, x, MatTypeCV8UC1, data)
+}
+
+// AbsDiff calculates the per-element absolute difference between two arrays
+// or between an array and a scalar.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6fef31bc8c4071cbc114a758a2b79c14
+//
+func AbsDiff(src1, src2 Mat, dst *Mat) {
+ C.Mat_AbsDiff(src1.p, src2.p, dst.p)
+}
+
+// Add calculates the per-element sum of two arrays or an array and a scalar.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga10ac1bfb180e2cfda1701d06c24fdbd6
+//
+func Add(src1, src2 Mat, dst *Mat) {
+ C.Mat_Add(src1.p, src2.p, dst.p)
+}
+
+// AddWeighted calculates the weighted sum of two arrays.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19
+//
+func AddWeighted(src1 Mat, alpha float64, src2 Mat, beta float64, gamma float64, dst *Mat) {
+ C.Mat_AddWeighted(src1.p, C.double(alpha),
+ src2.p, C.double(beta), C.double(gamma), dst.p)
+}
+
+// BitwiseAnd computes bitwise conjunction of the two arrays (dst = src1 & src2).
+// Calculates the per-element bit-wise conjunction of two arrays
+// or an array and a scalar.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga60b4d04b251ba5eb1392c34425497e14
+//
+func BitwiseAnd(src1 Mat, src2 Mat, dst *Mat) {
+ C.Mat_BitwiseAnd(src1.p, src2.p, dst.p)
+}
+
+// BitwiseAndWithMask computes bitwise conjunction of the two arrays (dst = src1 & src2).
+// Calculates the per-element bit-wise conjunction of two arrays
+// or an array and a scalar. It has an additional parameter for a mask.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga60b4d04b251ba5eb1392c34425497e14
+//
+func BitwiseAndWithMask(src1 Mat, src2 Mat, dst *Mat, mask Mat) {
+ C.Mat_BitwiseAndWithMask(src1.p, src2.p, dst.p, mask.p)
+}
+
+// BitwiseNot inverts every bit of an array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga0002cf8b418479f4cb49a75442baee2f
+//
+func BitwiseNot(src1 Mat, dst *Mat) {
+ C.Mat_BitwiseNot(src1.p, dst.p)
+}
+
+// BitwiseNotWithMask inverts every bit of an array. It has an additional parameter for a mask.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga0002cf8b418479f4cb49a75442baee2f
+//
+func BitwiseNotWithMask(src1 Mat, dst *Mat, mask Mat) {
+ C.Mat_BitwiseNotWithMask(src1.p, dst.p, mask.p)
+}
+
+// BitwiseOr calculates the per-element bit-wise disjunction of two arrays
+// or an array and a scalar.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gab85523db362a4e26ff0c703793a719b4
+//
+func BitwiseOr(src1 Mat, src2 Mat, dst *Mat) {
+ C.Mat_BitwiseOr(src1.p, src2.p, dst.p)
+}
+
+// BitwiseOrWithMask calculates the per-element bit-wise disjunction of two arrays
+// or an array and a scalar. It has an additional parameter for a mask.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gab85523db362a4e26ff0c703793a719b4
+//
+func BitwiseOrWithMask(src1 Mat, src2 Mat, dst *Mat, mask Mat) {
+ C.Mat_BitwiseOrWithMask(src1.p, src2.p, dst.p, mask.p)
+}
+
+// BitwiseXor calculates the per-element bit-wise "exclusive or" operation
+// on two arrays or an array and a scalar.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga84b2d8188ce506593dcc3f8cd00e8e2c
+//
+func BitwiseXor(src1 Mat, src2 Mat, dst *Mat) {
+ C.Mat_BitwiseXor(src1.p, src2.p, dst.p)
+}
+
+// BitwiseXorWithMask calculates the per-element bit-wise "exclusive or" operation
+// on two arrays or an array and a scalar. It has an additional parameter for a mask.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga84b2d8188ce506593dcc3f8cd00e8e2c
+//
+func BitwiseXorWithMask(src1 Mat, src2 Mat, dst *Mat, mask Mat) {
+ C.Mat_BitwiseXorWithMask(src1.p, src2.p, dst.p, mask.p)
+}
+
+// BatchDistance is a naive nearest neighbor finder.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4ba778a1c57f83233b1d851c83f5a622
+//
+func BatchDistance(src1 Mat, src2 Mat, dist Mat, dtype int, nidx Mat, normType int, K int, mask Mat, update int, crosscheck bool) {
+ C.Mat_BatchDistance(src1.p, src2.p, dist.p, C.int(dtype), nidx.p, C.int(normType), C.int(K), mask.p, C.int(update), C.bool(crosscheck))
+}
+
+// BorderInterpolate computes the source location of an extrapolated pixel.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga247f571aa6244827d3d798f13892da58
+//
+func BorderInterpolate(p int, len int, borderType CovarFlags) int {
+ ret := C.Mat_BorderInterpolate(C.int(p), C.int(len), C.int(borderType))
+ return int(ret)
+}
+
+// CovarFlags are the covariation flags used by functions such as BorderInterpolate.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/de1/group__core.html#ga719ebd4a73f30f4fab258ab7616d0f0f
+//
+type CovarFlags int
+
+const (
+ // CovarScrambled indicates to scramble the results.
+ CovarScrambled CovarFlags = 0
+
+ // CovarNormal indicates to use normal covariation.
+ CovarNormal = 1
+
+ // CovarUseAvg indicates to use average covariation.
+ CovarUseAvg = 2
+
+ // CovarScale indicates to use scaled covariation.
+ CovarScale = 4
+
+ // CovarRows indicates to use covariation on rows.
+ CovarRows = 8
+
+ // CovarCols indicates to use covariation on columns.
+ CovarCols = 16
+)
+
+// CalcCovarMatrix calculates the covariance matrix of a set of vectors.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga017122d912af19d7d0d2cccc2d63819f
+//
+func CalcCovarMatrix(samples Mat, covar *Mat, mean *Mat, flags CovarFlags, ctype int) {
+ C.Mat_CalcCovarMatrix(samples.p, covar.p, mean.p, C.int(flags), C.int(ctype))
+}
+
+// CartToPolar calculates the magnitude and angle of 2D vectors.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gac5f92f48ec32cacf5275969c33ee837d
+//
+func CartToPolar(x Mat, y Mat, magnitude *Mat, angle *Mat, angleInDegrees bool) {
+ C.Mat_CartToPolar(x.p, y.p, magnitude.p, angle.p, C.bool(angleInDegrees))
+}
+
+// CheckRange checks every element of an input array for invalid values.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga2bd19d89cae59361416736f87e3c7a64
+//
+func CheckRange(src Mat) bool {
+ return bool(C.Mat_CheckRange(src.p))
+}
+
+// Compare performs the per-element comparison of two arrays
+// or an array and scalar value.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga303cfb72acf8cbb36d884650c09a3a97
+//
+func Compare(src1 Mat, src2 Mat, dst *Mat, ct CompareType) {
+ C.Mat_Compare(src1.p, src2.p, dst.p, C.int(ct))
+}
+
+// CountNonZero counts non-zero array elements.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaa4b89393263bb4d604e0fe5986723914
+//
+func CountNonZero(src Mat) int {
+ return int(C.Mat_CountNonZero(src.p))
+}
+
+// CompleteSymm copies the lower or the upper half of a square matrix to its another half.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaa9d88dcd0e54b6d1af38d41f2a3e3d25
+//
+func CompleteSymm(m Mat, lowerToUpper bool) {
+ C.Mat_CompleteSymm(m.p, C.bool(lowerToUpper))
+}
+
+// ConvertScaleAbs scales, calculates absolute values, and converts the result to 8-bit.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga3460e9c9f37b563ab9dd550c4d8c4e7d
+//
+func ConvertScaleAbs(src Mat, dst *Mat, alpha float64, beta float64) {
+ C.Mat_ConvertScaleAbs(src.p, dst.p, C.double(alpha), C.double(beta))
+}
+
+// CopyMakeBorder forms a border around an image (applies padding).
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga2ac1049c2c3dd25c2b41bffe17658a36
+//
+func CopyMakeBorder(src Mat, dst *Mat, top int, bottom int, left int, right int, bt BorderType, value color.RGBA) {
+
+ cValue := C.struct_Scalar{
+ val1: C.double(value.B),
+ val2: C.double(value.G),
+ val3: C.double(value.R),
+ val4: C.double(value.A),
+ }
+
+ C.Mat_CopyMakeBorder(src.p, dst.p, C.int(top), C.int(bottom), C.int(left), C.int(right), C.int(bt), cValue)
+}
+
+// DftFlags represents a DFT or DCT flag.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaf4dde112b483b38175621befedda1f1c
+//
+type DftFlags int
+
+const (
+ // DftForward performs forward 1D or 2D dft or dct.
+ DftForward DftFlags = 0
+
+ // DftInverse performs an inverse 1D or 2D transform.
+ DftInverse = 1
+
+ // DftScale scales the result: divide it by the number of array elements. Normally, it is combined with DFT_INVERSE.
+ DftScale = 2
+
+ // DftRows performs a forward or inverse transform of every individual row of the input matrix.
+ DftRows = 4
+
+ // DftComplexOutput performs a forward transformation of 1D or 2D real array; the result, though being a complex array, has complex-conjugate symmetry
+ DftComplexOutput = 16
+
+ // DftRealOutput performs an inverse transformation of a 1D or 2D complex array; the result is normally a complex array of the same size,
+ // however, if the input array has conjugate-complex symmetry (for example, it is a result of forward transformation with DFT_COMPLEX_OUTPUT flag),
+ // the output is a real array.
+ DftRealOutput = 32
+
+ // DftComplexInput specifies that input is complex input. If this flag is set, the input must have 2 channels.
+ DftComplexInput = 64
+
+ // DctInverse performs an inverse 1D or 2D dct transform.
+ DctInverse = DftInverse
+
+ // DctRows performs a forward or inverse dct transform of every individual row of the input matrix.
+ DctRows = DftRows
+)
+
+// DCT performs a forward or inverse discrete Cosine transform of 1D or 2D array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga85aad4d668c01fbd64825f589e3696d4
+//
+func DCT(src Mat, dst *Mat, flags DftFlags) {
+ C.Mat_DCT(src.p, dst.p, C.int(flags))
+}
+
+// Determinant returns the determinant of a square floating-point matrix.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaf802bd9ca3e07b8b6170645ef0611d0c
+//
+func Determinant(src Mat) float64 {
+ return float64(C.Mat_Determinant(src.p))
+}
+
+// DFT performs a forward or inverse Discrete Fourier Transform (DFT)
+// of a 1D or 2D floating-point array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gadd6cf9baf2b8b704a11b5f04aaf4f39d
+//
+func DFT(src Mat, dst *Mat, flags DftFlags) {
+ C.Mat_DFT(src.p, dst.p, C.int(flags))
+}
+
+// Divide performs the per-element division
+// on two arrays or an array and a scalar.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6db555d30115642fedae0cda05604874
+//
+func Divide(src1 Mat, src2 Mat, dst *Mat) {
+ C.Mat_Divide(src1.p, src2.p, dst.p)
+}
+
+// Eigen calculates eigenvalues and eigenvectors of a symmetric matrix.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga9fa0d58657f60eaa6c71f6fbb40456e3
+//
+func Eigen(src Mat, eigenvalues *Mat, eigenvectors *Mat) bool {
+ ret := C.Mat_Eigen(src.p, eigenvalues.p, eigenvectors.p)
+ return bool(ret)
+}
+
+// EigenNonSymmetric calculates eigenvalues and eigenvectors of a non-symmetric matrix (real eigenvalues only).
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaf51987e03cac8d171fbd2b327cf966f6
+//
+func EigenNonSymmetric(src Mat, eigenvalues *Mat, eigenvectors *Mat) {
+ C.Mat_EigenNonSymmetric(src.p, eigenvalues.p, eigenvectors.p)
+}
+
+// Exp calculates the exponent of every array element.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga3e10108e2162c338f1b848af619f39e5
+//
+func Exp(src Mat, dst *Mat) {
+ C.Mat_Exp(src.p, dst.p)
+}
+
+// ExtractChannel extracts a single channel from src (coi is 0-based index).
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gacc6158574aa1f0281878c955bcf35642
+//
+func ExtractChannel(src Mat, dst *Mat, coi int) {
+ C.Mat_ExtractChannel(src.p, dst.p, C.int(coi))
+}
+
+// FindNonZero returns the list of locations of non-zero pixels.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaed7df59a3539b4cc0fe5c9c8d7586190
+//
+func FindNonZero(src Mat, idx *Mat) {
+ C.Mat_FindNonZero(src.p, idx.p)
+}
+
+// Flip flips a 2D array around horizontal(0), vertical(1), or both axes(-1).
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaca7be533e3dac7feb70fc60635adf441
+//
+func Flip(src Mat, dst *Mat, flipCode int) {
+ C.Mat_Flip(src.p, dst.p, C.int(flipCode))
+}
+
+// Gemm performs generalized matrix multiplication.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gacb6e64071dffe36434e1e7ee79e7cb35
+//
+func Gemm(src1, src2 Mat, alpha float64, src3 Mat, beta float64, dst *Mat, flags int) {
+ C.Mat_Gemm(src1.p, src2.p, C.double(alpha), src3.p, C.double(beta), dst.p, C.int(flags))
+}
+
+// GetOptimalDFTSize returns the optimal Discrete Fourier Transform (DFT) size
+// for a given vector size.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6577a2e59968936ae02eb2edde5de299
+//
+func GetOptimalDFTSize(vecsize int) int {
+ return int(C.Mat_GetOptimalDFTSize(C.int(vecsize)))
+}
+
+// Hconcat applies horizontal concatenation to given matrices.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaab5ceee39e0580f879df645a872c6bf7
+//
+func Hconcat(src1, src2 Mat, dst *Mat) {
+ C.Mat_Hconcat(src1.p, src2.p, dst.p)
+}
+
+// Vconcat applies vertical concatenation to given matrices.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaab5ceee39e0580f879df645a872c6bf7
+//
+func Vconcat(src1, src2 Mat, dst *Mat) {
+ C.Mat_Vconcat(src1.p, src2.p, dst.p)
+}
+
+// RotateFlag for image rotation
+//
+//
+// For further details please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6f45d55c0b1cc9d97f5353a7c8a7aac2
+type RotateFlag int
+
+const (
+ // Rotate90Clockwise allows to rotate image 90 degrees clockwise
+ Rotate90Clockwise RotateFlag = 0
+ // Rotate180Clockwise allows to rotate image 180 degrees clockwise
+ Rotate180Clockwise = 1
+ // Rotate90CounterClockwise allows to rotate 270 degrees clockwise
+ Rotate90CounterClockwise = 2
+)
+
+// Rotate rotates a 2D array in multiples of 90 degrees
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4ad01c0978b0ce64baa246811deeac24
+func Rotate(src Mat, dst *Mat, code RotateFlag) {
+ C.Rotate(src.p, dst.p, C.int(code))
+}
+
+// IDCT calculates the inverse Discrete Cosine Transform of a 1D or 2D array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga77b168d84e564c50228b69730a227ef2
+//
+func IDCT(src Mat, dst *Mat, flags int) {
+ C.Mat_Idct(src.p, dst.p, C.int(flags))
+}
+
+// IDFT calculates the inverse Discrete Fourier Transform of a 1D or 2D array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaa708aa2d2e57a508f968eb0f69aa5ff1
+//
+func IDFT(src Mat, dst *Mat, flags, nonzeroRows int) {
+ C.Mat_Idft(src.p, dst.p, C.int(flags), C.int(nonzeroRows))
+}
+
+// InRange checks if array elements lie between the elements of two Mat arrays.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga48af0ab51e36436c5d04340e036ce981
+//
+func InRange(src, lb, ub Mat, dst *Mat) {
+ C.Mat_InRange(src.p, lb.p, ub.p, dst.p)
+}
+
+// InRangeWithScalar checks if array elements lie between the elements of two Scalars
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga48af0ab51e36436c5d04340e036ce981
+//
+func InRangeWithScalar(src Mat, lb, ub Scalar, dst *Mat) {
+ lbVal := C.struct_Scalar{
+ val1: C.double(lb.Val1),
+ val2: C.double(lb.Val2),
+ val3: C.double(lb.Val3),
+ val4: C.double(lb.Val4),
+ }
+
+ ubVal := C.struct_Scalar{
+ val1: C.double(ub.Val1),
+ val2: C.double(ub.Val2),
+ val3: C.double(ub.Val3),
+ val4: C.double(ub.Val4),
+ }
+
+ C.Mat_InRangeWithScalar(src.p, lbVal, ubVal, dst.p)
+}
+
+// InsertChannel inserts a single channel to dst (coi is 0-based index)
+// (it replaces channel i with another in dst).
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga1d4bd886d35b00ec0b764cb4ce6eb515
+//
+func InsertChannel(src Mat, dst *Mat, coi int) {
+ C.Mat_InsertChannel(src.p, dst.p, C.int(coi))
+}
+
+// Invert finds the inverse or pseudo-inverse of a matrix.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gad278044679d4ecf20f7622cc151aaaa2
+//
+func Invert(src Mat, dst *Mat, flags int) float64 {
+ ret := C.Mat_Invert(src.p, dst.p, C.int(flags))
+ return float64(ret)
+}
+
+// KMeansFlags for kmeans center selection
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/de1/group__core.html#ga276000efe55ee2756e0c471c7b270949
+type KMeansFlags int
+
+const (
+ // KMeansRandomCenters selects random initial centers in each attempt.
+ KMeansRandomCenters KMeansFlags = 0
+ // KMeansPPCenters uses kmeans++ center initialization by Arthur and Vassilvitskii [Arthur2007].
+ KMeansPPCenters = 1
+ // KMeansUseInitialLabels uses the user-supplied lables during the first (and possibly the only) attempt
+ // instead of computing them from the initial centers. For the second and further attempts, use the random or semi-random // centers. Use one of KMEANS_*_CENTERS flag to specify the exact method.
+ KMeansUseInitialLabels = 2
+)
+
+// KMeans finds centers of clusters and groups input samples around the clusters.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d5/d38/group__core__cluster.html#ga9a34dc06c6ec9460e90860f15bcd2f88
+//
+func KMeans(data Mat, k int, bestLabels *Mat, criteria TermCriteria, attempts int, flags KMeansFlags, centers *Mat) float64 {
+ ret := C.KMeans(data.p, C.int(k), bestLabels.p, criteria.p, C.int(attempts), C.int(flags), centers.p)
+ return float64(ret)
+}
+
+// KMeansPoints finds centers of clusters and groups input samples around the clusters.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d5/d38/group__core__cluster.html#ga9a34dc06c6ec9460e90860f15bcd2f88
+//
+func KMeansPoints(points []image.Point, k int, bestLabels *Mat, criteria TermCriteria, attempts int, flags KMeansFlags, centers *Mat) float64 {
+ cPoints := toCPoints(points)
+ ret := C.KMeansPoints(cPoints, C.int(k), bestLabels.p, criteria.p, C.int(attempts), C.int(flags), centers.p)
+ return float64(ret)
+}
+
+// Log calculates the natural logarithm of every array element.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga937ecdce4679a77168730830a955bea7
+//
+func Log(src Mat, dst *Mat) {
+ C.Mat_Log(src.p, dst.p)
+}
+
+// Magnitude calculates the magnitude of 2D vectors.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga6d3b097586bca4409873d64a90fe64c3
+//
+func Magnitude(x, y Mat, magnitude *Mat) {
+ C.Mat_Magnitude(x.p, y.p, magnitude.p)
+}
+
+// Max calculates per-element maximum of two arrays or an array and a scalar.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gacc40fa15eac0fb83f8ca70b7cc0b588d
+//
+func Max(src1, src2 Mat, dst *Mat) {
+ C.Mat_Max(src1.p, src2.p, dst.p)
+}
+
+// MeanStdDev calculates a mean and standard deviation of array elements.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga846c858f4004d59493d7c6a4354b301d
+//
+func MeanStdDev(src Mat, dst *Mat, dstStdDev *Mat) {
+ C.Mat_MeanStdDev(src.p, dst.p, dstStdDev.p)
+}
+
+// Merge creates one multi-channel array out of several single-channel ones.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga7d7b4d6c6ee504b30a20b1680029c7b4
+//
+func Merge(mv []Mat, dst *Mat) {
+ cMatArray := make([]C.Mat, len(mv))
+ for i, r := range mv {
+ cMatArray[i] = r.p
+ }
+ cMats := C.struct_Mats{
+ mats: (*C.Mat)(&cMatArray[0]),
+ length: C.int(len(mv)),
+ }
+
+ C.Mat_Merge(cMats, dst.p)
+}
+
+// Min calculates per-element minimum of two arrays or an array and a scalar.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga9af368f182ee76d0463d0d8d5330b764
+//
+func Min(src1, src2 Mat, dst *Mat) {
+ C.Mat_Min(src1.p, src2.p, dst.p)
+}
+
+// MinMaxIdx finds the global minimum and maximum in an array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga7622c466c628a75d9ed008b42250a73f
+//
+func MinMaxIdx(input Mat) (minVal, maxVal float32, minIdx, maxIdx int) {
+ var cMinVal C.double
+ var cMaxVal C.double
+ var cMinIdx C.int
+ var cMaxIdx C.int
+
+ C.Mat_MinMaxIdx(input.p, &cMinVal, &cMaxVal, &cMinIdx, &cMaxIdx)
+
+ return float32(cMinVal), float32(cMaxVal), int(minIdx), int(maxIdx)
+}
+
+// MinMaxLoc finds the global minimum and maximum in an array.
+//
+// For further details, please see:
+// https://docs.opencv.org/trunk/d2/de8/group__core__array.html#gab473bf2eb6d14ff97e89b355dac20707
+//
+func MinMaxLoc(input Mat) (minVal, maxVal float32, minLoc, maxLoc image.Point) {
+ var cMinVal C.double
+ var cMaxVal C.double
+ var cMinLoc C.struct_Point
+ var cMaxLoc C.struct_Point
+
+ C.Mat_MinMaxLoc(input.p, &cMinVal, &cMaxVal, &cMinLoc, &cMaxLoc)
+
+ minLoc = image.Pt(int(cMinLoc.x), int(cMinLoc.y))
+ maxLoc = image.Pt(int(cMaxLoc.x), int(cMaxLoc.y))
+
+ return float32(cMinVal), float32(cMaxVal), minLoc, maxLoc
+}
+
+//Mulspectrums performs the per-element multiplication of two Fourier spectrums.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga3ab38646463c59bf0ce962a9d51db64f
+//
+func MulSpectrums(a Mat, b Mat, dst *Mat, flags DftFlags) {
+ C.Mat_MulSpectrums(a.p, b.p, dst.p, C.int(flags))
+}
+
+// Multiply calculates the per-element scaled product of two arrays.
+// Both input arrays must be of the same size and the same type.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga979d898a58d7f61c53003e162e7ad89f
+//
+func Multiply(src1 Mat, src2 Mat, dst *Mat) {
+ C.Mat_Multiply(src1.p, src2.p, dst.p)
+}
+
+// NormType for normalization operations.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gad12cefbcb5291cf958a85b4b67b6149f
+//
+type NormType int
+
+const (
+ // NormInf indicates use infinite normalization.
+ NormInf NormType = 1
+
+ // NormL1 indicates use L1 normalization.
+ NormL1 = 2
+
+ // NormL2 indicates use L2 normalization.
+ NormL2 = 4
+
+ // NormL2Sqr indicates use L2 squared normalization.
+ NormL2Sqr = 5
+
+ // NormHamming indicates use Hamming normalization.
+ NormHamming = 6
+
+ // NormHamming2 indicates use Hamming 2-bit normalization.
+ NormHamming2 = 7
+
+ // NormTypeMask indicates use type mask for normalization.
+ NormTypeMask = 7
+
+ // NormRelative indicates use relative normalization.
+ NormRelative = 8
+
+ // NormMinMax indicates use min/max normalization.
+ NormMinMax = 32
+)
+
+// Normalize normalizes the norm or value range of an array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga87eef7ee3970f86906d69a92cbf064bd
+//
+func Normalize(src Mat, dst *Mat, alpha float64, beta float64, typ NormType) {
+ C.Mat_Normalize(src.p, dst.p, C.double(alpha), C.double(beta), C.int(typ))
+}
+
+// Norm calculates the absolute norm of an array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga7c331fb8dd951707e184ef4e3f21dd33
+//
+func Norm(src1 Mat, normType NormType) float64 {
+ return float64(C.Norm(src1.p, C.int(normType)))
+}
+
+// PerspectiveTransform performs the perspective matrix transformation of vectors.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gad327659ac03e5fd6894b90025e6900a7
+//
+func PerspectiveTransform(src Mat, dst *Mat, tm Mat) {
+ C.Mat_PerspectiveTransform(src.p, dst.p, tm.p)
+}
+
+// TermCriteriaType for TermCriteria.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d9/d5d/classcv_1_1TermCriteria.html#a56fecdc291ccaba8aad27d67ccf72c57
+//
+type TermCriteriaType int
+
+const (
+ // Count is the maximum number of iterations or elements to compute.
+ Count TermCriteriaType = 1
+
+ // MaxIter is the maximum number of iterations or elements to compute.
+ MaxIter = 1
+
+ // EPS is the desired accuracy or change in parameters at which the
+ // iterative algorithm stops.
+ EPS = 2
+)
+
+type SolveDecompositionFlags int
+
+const (
+ // Gaussian elimination with the optimal pivot element chosen.
+ SolveDecompositionLu = 0
+
+ // Singular value decomposition (SVD) method. The system can be over-defined and/or the matrix src1 can be singular.
+ SolveDecompositionSvd = 1
+
+ // Eigenvalue decomposition. The matrix src1 must be symmetrical.
+ SolveDecompositionEing = 2
+
+ // Cholesky LL^T factorization. The matrix src1 must be symmetrical and positively defined.
+ SolveDecompositionCholesky = 3
+
+ // QR factorization. The system can be over-defined and/or the matrix src1 can be singular.
+ SolveDecompositionQr = 4
+
+ // While all the previous flags are mutually exclusive, this flag can be used together with any of the previous.
+ // It means that the normal equations 𝚜𝚛𝚌𝟷^T⋅𝚜𝚛𝚌𝟷⋅𝚍𝚜𝚝=𝚜𝚛𝚌𝟷^T𝚜𝚛𝚌𝟸 are solved instead of the original system
+ // 𝚜𝚛𝚌𝟷⋅𝚍𝚜𝚝=𝚜𝚛𝚌𝟸.
+ SolveDecompositionNormal = 5
+)
+
+// Solve solves one or more linear systems or least-squares problems.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga12b43690dbd31fed96f213eefead2373
+//
+func Solve(src1 Mat, src2 Mat, dst *Mat, flags SolveDecompositionFlags) bool {
+ return bool(C.Mat_Solve(src1.p, src2.p, dst.p, C.int(flags)))
+}
+
+// SolveCubic finds the real roots of a cubic equation.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga1c3b0b925b085b6e96931ee309e6a1da
+//
+func SolveCubic(coeffs Mat, roots *Mat) int {
+ return int(C.Mat_SolveCubic(coeffs.p, roots.p))
+}
+
+// SolvePoly finds the real or complex roots of a polynomial equation.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gac2f5e953016fabcdf793d762f4ec5dce
+//
+func SolvePoly(coeffs Mat, roots *Mat, maxIters int) float64 {
+ return float64(C.Mat_SolvePoly(coeffs.p, roots.p, C.int(maxIters)))
+}
+
+type ReduceTypes int
+
+const (
+ // The output is the sum of all rows/columns of the matrix.
+ ReduceSum ReduceTypes = 0
+
+ // The output is the mean vector of all rows/columns of the matrix.
+ ReduceAvg ReduceTypes = 1
+
+ // The output is the maximum (column/row-wise) of all rows/columns of the matrix.
+ ReduceMax ReduceTypes = 2
+
+ // The output is the minimum (column/row-wise) of all rows/columns of the matrix.
+ ReduceMin ReduceTypes = 3
+)
+
+// Reduce reduces a matrix to a vector.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4b78072a303f29d9031d56e5638da78e
+//
+func Reduce(src Mat, dst *Mat, dim int, rType ReduceTypes, dType int) {
+ C.Mat_Reduce(src.p, dst.p, C.int(dim), C.int(rType), C.int(dType))
+}
+
+// Repeat fills the output array with repeated copies of the input array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga496c3860f3ac44c40b48811333cfda2d
+//
+func Repeat(src Mat, nY int, nX int, dst *Mat) {
+ C.Mat_Repeat(src.p, C.int(nY), C.int(nX), dst.p)
+}
+
+// Calculates the sum of a scaled array and another array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga9e0845db4135f55dcf20227402f00d98
+//
+func ScaleAdd(src1 Mat, alpha float64, src2 Mat, dst *Mat) {
+ C.Mat_ScaleAdd(src1.p, C.double(alpha), src2.p, dst.p)
+}
+
+// SetIdentity initializes a scaled identity matrix.
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga388d7575224a4a277ceb98ccaa327c99
+//
+func SetIdentity(src Mat, scalar float64) {
+ C.Mat_SetIdentity(src.p, C.double(scalar))
+}
+
+type SortFlags int
+
+const (
+ // Each matrix row is sorted independently
+ SortEveryRow SortFlags = 0
+
+ // Each matrix column is sorted independently; this flag and the previous one are mutually exclusive.
+ SortEveryColumn SortFlags = 1
+
+ // Each matrix row is sorted in the ascending order.
+ SortAscending SortFlags = 0
+
+ // Each matrix row is sorted in the descending order; this flag and the previous one are also mutually exclusive.
+ SortDescending SortFlags = 16
+)
+
+// Sort sorts each row or each column of a matrix.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga45dd56da289494ce874be2324856898f
+//
+func Sort(src Mat, dst *Mat, flags SortFlags) {
+ C.Mat_Sort(src.p, dst.p, C.int(flags))
+}
+
+// SortIdx sorts each row or each column of a matrix.
+// Instead of reordering the elements themselves, it stores the indices of sorted elements in the output array
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gadf35157cbf97f3cb85a545380e383506
+//
+func SortIdx(src Mat, dst *Mat, flags SortFlags) {
+ C.Mat_SortIdx(src.p, dst.p, C.int(flags))
+}
+
+// Split creates an array of single channel images from a multi-channel image
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga0547c7fed86152d7e9d0096029c8518a
+//
+func Split(src Mat) (mv []Mat) {
+ cMats := C.struct_Mats{}
+ C.Mat_Split(src.p, &(cMats))
+ mv = make([]Mat, cMats.length)
+ for i := C.int(0); i < cMats.length; i++ {
+ mv[i].p = C.Mats_get(cMats, i)
+ }
+ return
+}
+
+// Subtract calculates the per-element subtraction of two arrays or an array and a scalar.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaa0f00d98b4b5edeaeb7b8333b2de353b
+//
+func Subtract(src1 Mat, src2 Mat, dst *Mat) {
+ C.Mat_Subtract(src1.p, src2.p, dst.p)
+}
+
+// Trace returns the trace of a matrix.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga3419ac19c7dcd2be4bd552a23e147dd8
+//
+func Trace(src Mat) Scalar {
+ s := C.Mat_Trace(src.p)
+ return NewScalar(float64(s.val1), float64(s.val2), float64(s.val3), float64(s.val4))
+}
+
+// Transform performs the matrix transformation of every array element.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga393164aa54bb9169ce0a8cc44e08ff22
+//
+func Transform(src Mat, dst *Mat, tm Mat) {
+ C.Mat_Transform(src.p, dst.p, tm.p)
+}
+
+// Transpose transposes a matrix.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga46630ed6c0ea6254a35f447289bd7404
+//
+func Transpose(src Mat, dst *Mat) {
+ C.Mat_Transpose(src.p, dst.p)
+}
+
+// Pow raises every array element to a power.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#gaf0d056b5bd1dc92500d6f6cf6bac41ef
+//
+func Pow(src Mat, power float64, dst *Mat) {
+ C.Mat_Pow(src.p, C.double(power), dst.p)
+}
+
+// PolatToCart calculates x and y coordinates of 2D vectors from their magnitude and angle.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga581ff9d44201de2dd1b40a50db93d665
+//
+func PolarToCart(magnitude Mat, degree Mat, x *Mat, y *Mat, angleInDegrees bool) {
+ C.Mat_PolarToCart(magnitude.p, degree.p, x.p, y.p, C.bool(angleInDegrees))
+}
+
+// Phase calculates the rotation angle of 2D vectors.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga9db9ca9b4d81c3bde5677b8f64dc0137
+//
+func Phase(x, y Mat, angle *Mat, angleInDegrees bool) {
+ C.Mat_Phase(x.p, y.p, angle.p, C.bool(angleInDegrees))
+}
+
+// TermCriteria is the criteria for iterative algorithms.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d9/d5d/classcv_1_1TermCriteria.html
+//
+type TermCriteria struct {
+ p C.TermCriteria
+}
+
+// NewTermCriteria returns a new TermCriteria.
+func NewTermCriteria(typ TermCriteriaType, maxCount int, epsilon float64) TermCriteria {
+ return TermCriteria{p: C.TermCriteria_New(C.int(typ), C.int(maxCount), C.double(epsilon))}
+}
+
+// Scalar is a 4-element vector widely used in OpenCV to pass pixel values.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d1/da0/classcv_1_1Scalar__.html
+//
+type Scalar struct {
+ Val1 float64
+ Val2 float64
+ Val3 float64
+ Val4 float64
+}
+
+// NewScalar returns a new Scalar. These are usually colors typically being in BGR order.
+func NewScalar(v1 float64, v2 float64, v3 float64, v4 float64) Scalar {
+ s := Scalar{Val1: v1, Val2: v2, Val3: v3, Val4: v4}
+ return s
+}
+
+// KeyPoint is data structure for salient point detectors.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d2/d29/classcv_1_1KeyPoint.html
+//
+type KeyPoint struct {
+ X, Y float64
+ Size, Angle, Response float64
+ Octave, ClassID int
+}
+
+// DMatch is data structure for matching keypoint descriptors.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/de0/classcv_1_1DMatch.html#a546ddb9a87898f06e510e015a6de596e
+//
+type DMatch struct {
+ QueryIdx int
+ TrainIdx int
+ ImgIdx int
+ Distance float64
+}
+
+// Vecf is a generic vector of floats.
+type Vecf []float32
+
+// GetVecfAt returns a vector of floats. Its size corresponds to the number of
+// channels of the Mat.
+func (m *Mat) GetVecfAt(row int, col int) Vecf {
+ ch := m.Channels()
+ v := make(Vecf, ch)
+
+ for c := 0; c < ch; c++ {
+ v[c] = m.GetFloatAt(row, col*ch+c)
+ }
+
+ return v
+}
+
+// Veci is a generic vector of integers.
+type Veci []int32
+
+// GetVeciAt returns a vector of integers. Its size corresponds to the number
+// of channels of the Mat.
+func (m *Mat) GetVeciAt(row int, col int) Veci {
+ ch := m.Channels()
+ v := make(Veci, ch)
+
+ for c := 0; c < ch; c++ {
+ v[c] = m.GetIntAt(row, col*ch+c)
+ }
+
+ return v
+}
+
+// GetTickCount returns the number of ticks.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/db/de0/group__core__utils.html#gae73f58000611a1af25dd36d496bf4487
+//
+func GetTickCount() float64 {
+ return float64(C.GetCVTickCount())
+}
+
+// GetTickFrequency returns the number of ticks per second.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/db/de0/group__core__utils.html#ga705441a9ef01f47acdc55d87fbe5090c
+//
+func GetTickFrequency() float64 {
+ return float64(C.GetTickFrequency())
+}
+
+func toByteArray(b []byte) (*C.struct_ByteArray, error) {
+ if len(b) == 0 {
+ return nil, ErrEmptyByteSlice
+ }
+ return &C.struct_ByteArray{
+ data: (*C.char)(unsafe.Pointer(&b[0])),
+ length: C.int(len(b)),
+ }, nil
+}
+
+func toGoBytes(b C.struct_ByteArray) []byte {
+ return C.GoBytes(unsafe.Pointer(b.data), b.length)
+}
+
+func toRectangles(ret C.Rects) []image.Rectangle {
+ cArray := ret.rects
+ length := int(ret.length)
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cArray)),
+ Len: length,
+ Cap: length,
+ }
+ s := *(*[]C.Rect)(unsafe.Pointer(&hdr))
+
+ rects := make([]image.Rectangle, length)
+ for i, r := range s {
+ rects[i] = image.Rect(int(r.x), int(r.y), int(r.x+r.width), int(r.y+r.height))
+ }
+ return rects
+}
+
+func toRect(rect C.Rect) image.Rectangle {
+ return image.Rect(int(rect.x), int(rect.y), int(rect.x+rect.width), int(rect.y+rect.height))
+}
+
+func toCPoints(points []image.Point) C.struct_Points {
+ cPointSlice := make([]C.struct_Point, len(points))
+ for i, point := range points {
+ cPointSlice[i] = C.struct_Point{
+ x: C.int(point.X),
+ y: C.int(point.Y),
+ }
+ }
+
+ return C.struct_Points{
+ points: (*C.Point)(&cPointSlice[0]),
+ length: C.int(len(points)),
+ }
+}
+
+func toCStrings(strs []string) C.struct_CStrings {
+ cStringsSlice := make([]*C.char, len(strs))
+ for i, s := range strs {
+ cStringsSlice[i] = C.CString(s)
+ }
+
+ return C.struct_CStrings{
+ strs: (**C.char)(&cStringsSlice[0]),
+ length: C.int(len(strs)),
+ }
+}
+
+// RowRange creates a matrix header for the specified row span.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#aa6542193430356ad631a9beabc624107
+//
+func (m *Mat) RowRange(start, end int) Mat {
+ return newMat(C.Mat_rowRange(m.p, C.int(start), C.int(end)))
+}
+
+// ColRange creates a matrix header for the specified column span.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#aadc8f9210fe4dec50513746c246fa8d9
+//
+func (m *Mat) ColRange(start, end int) Mat {
+ return newMat(C.Mat_colRange(m.p, C.int(start), C.int(end)))
+}
diff --git a/vendor/gocv.io/x/gocv/core.h b/vendor/gocv.io/x/gocv/core.h
new file mode 100644
index 0000000..0a8a50f
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/core.h
@@ -0,0 +1,385 @@
+#ifndef _OPENCV3_CORE_H_
+#define _OPENCV3_CORE_H_
+
+#include
+#include
+
+// Wrapper for std::vector
+typedef struct CStrings {
+ const char** strs;
+ int length;
+} CStrings;
+
+typedef struct ByteArray {
+ char* data;
+ int length;
+} ByteArray;
+
+// Wrapper for std::vector
+typedef struct IntVector {
+ int* val;
+ int length;
+} IntVector;
+
+// Wrapper for std::vector
+typedef struct FloatVector {
+ float* val;
+ int length;
+} FloatVector;
+
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+typedef struct RawData {
+ int width;
+ int height;
+ struct ByteArray data;
+} RawData;
+
+// Wrapper for an individual cv::Point2f
+typedef struct Point2f {
+ float x;
+ float y;
+} Point2f;
+
+// Wrapper for an individual cv::cvPoint
+typedef struct Point {
+ int x;
+ int y;
+} Point;
+
+// Wrapper for the vector of Point structs aka std::vector
+typedef struct Points {
+ Point* points;
+ int length;
+} Points;
+
+// Contour is alias for Points
+typedef Points Contour;
+
+// Wrapper for the vector of Points vectors aka std::vector< std::vector >
+typedef struct Contours {
+ Contour* contours;
+ int length;
+} Contours;
+
+// Wrapper for an individual cv::cvRect
+typedef struct Rect {
+ int x;
+ int y;
+ int width;
+ int height;
+} Rect;
+
+// Wrapper for the vector of Rect struct aka std::vector
+typedef struct Rects {
+ Rect* rects;
+ int length;
+} Rects;
+
+// Wrapper for an individual cv::cvSize
+typedef struct Size {
+ int width;
+ int height;
+} Size;
+
+// Wrapper for an individual cv::RotatedRect
+typedef struct RotatedRect {
+ Contour pts;
+ Rect boundingRect;
+ Point center;
+ Size size;
+ double angle;
+} RotatedRect;
+
+// Wrapper for an individual cv::cvScalar
+typedef struct Scalar {
+ double val1;
+ double val2;
+ double val3;
+ double val4;
+} Scalar;
+
+// Wrapper for a individual cv::KeyPoint
+typedef struct KeyPoint {
+ double x;
+ double y;
+ double size;
+ double angle;
+ double response;
+ int octave;
+ int classID;
+} KeyPoint;
+
+// Wrapper for the vector of KeyPoint struct aka std::vector
+typedef struct KeyPoints {
+ KeyPoint* keypoints;
+ int length;
+} KeyPoints;
+
+// Wrapper for SimpleBlobDetectorParams aka SimpleBlobDetector::Params
+typedef struct SimpleBlobDetectorParams {
+ unsigned char blobColor;
+ bool filterByArea;
+ bool filterByCircularity;
+ bool filterByColor;
+ bool filterByConvexity;
+ bool filterByInertia;
+ float maxArea;
+ float maxCircularity;
+ float maxConvexity;
+ float maxInertiaRatio;
+ float maxThreshold;
+ float minArea;
+ float minCircularity;
+ float minConvexity;
+ float minDistBetweenBlobs;
+ float minInertiaRatio;
+ size_t minRepeatability;
+ float minThreshold;
+ float thresholdStep;
+} SimpleBlobDetectorParams;
+
+// Wrapper for an individual cv::DMatch
+typedef struct DMatch {
+ int queryIdx;
+ int trainIdx;
+ int imgIdx;
+ float distance;
+} DMatch;
+
+// Wrapper for the vector of DMatch struct aka std::vector
+typedef struct DMatches {
+ DMatch* dmatches;
+ int length;
+} DMatches;
+
+// Wrapper for the vector vector of DMatch struct aka std::vector>
+typedef struct MultiDMatches {
+ DMatches* dmatches;
+ int length;
+} MultiDMatches;
+
+// Wrapper for an individual cv::Moment
+typedef struct Moment {
+ double m00;
+ double m10;
+ double m01;
+ double m20;
+ double m11;
+ double m02;
+ double m30;
+ double m21;
+ double m12;
+ double m03;
+
+ double mu20;
+ double mu11;
+ double mu02;
+ double mu30;
+ double mu21;
+ double mu12;
+ double mu03;
+
+ double nu20;
+ double nu11;
+ double nu02;
+ double nu30;
+ double nu21;
+ double nu12;
+ double nu03;
+} Moment;
+
+#ifdef __cplusplus
+typedef cv::Mat* Mat;
+typedef cv::TermCriteria* TermCriteria;
+#else
+typedef void* Mat;
+typedef void* TermCriteria;
+#endif
+
+// Wrapper for the vector of Mat aka std::vector
+typedef struct Mats {
+ Mat* mats;
+ int length;
+} Mats;
+
+Mat Mats_get(struct Mats mats, int i);
+struct DMatches MultiDMatches_get(struct MultiDMatches mds, int index);
+
+struct ByteArray toByteArray(const char* buf, int len);
+void ByteArray_Release(struct ByteArray buf);
+
+void Contours_Close(struct Contours cs);
+void KeyPoints_Close(struct KeyPoints ks);
+void Rects_Close(struct Rects rs);
+void Mats_Close(struct Mats mats);
+void Point_Close(struct Point p);
+void Points_Close(struct Points ps);
+void DMatches_Close(struct DMatches ds);
+void MultiDMatches_Close(struct MultiDMatches mds);
+
+Mat Mat_New();
+Mat Mat_NewWithSize(int rows, int cols, int type);
+Mat Mat_NewFromScalar(const Scalar ar, int type);
+Mat Mat_NewWithSizeFromScalar(const Scalar ar, int rows, int cols, int type);
+Mat Mat_NewFromBytes(int rows, int cols, int type, struct ByteArray buf);
+Mat Mat_FromPtr(Mat m, int rows, int cols, int type, int prows, int pcols);
+void Mat_Close(Mat m);
+int Mat_Empty(Mat m);
+Mat Mat_Clone(Mat m);
+void Mat_CopyTo(Mat m, Mat dst);
+int Mat_Total(Mat m);
+void Mat_Size(Mat m, IntVector* res);
+void Mat_CopyToWithMask(Mat m, Mat dst, Mat mask);
+void Mat_ConvertTo(Mat m, Mat dst, int type);
+struct ByteArray Mat_ToBytes(Mat m);
+struct ByteArray Mat_DataPtr(Mat m);
+Mat Mat_Region(Mat m, Rect r);
+Mat Mat_Reshape(Mat m, int cn, int rows);
+void Mat_PatchNaNs(Mat m);
+Mat Mat_ConvertFp16(Mat m);
+Scalar Mat_Mean(Mat m);
+Scalar Mat_MeanWithMask(Mat m, Mat mask);
+Mat Mat_Sqrt(Mat m);
+int Mat_Rows(Mat m);
+int Mat_Cols(Mat m);
+int Mat_Channels(Mat m);
+int Mat_Type(Mat m);
+int Mat_Step(Mat m);
+
+uint8_t Mat_GetUChar(Mat m, int row, int col);
+uint8_t Mat_GetUChar3(Mat m, int x, int y, int z);
+int8_t Mat_GetSChar(Mat m, int row, int col);
+int8_t Mat_GetSChar3(Mat m, int x, int y, int z);
+int16_t Mat_GetShort(Mat m, int row, int col);
+int16_t Mat_GetShort3(Mat m, int x, int y, int z);
+int32_t Mat_GetInt(Mat m, int row, int col);
+int32_t Mat_GetInt3(Mat m, int x, int y, int z);
+float Mat_GetFloat(Mat m, int row, int col);
+float Mat_GetFloat3(Mat m, int x, int y, int z);
+double Mat_GetDouble(Mat m, int row, int col);
+double Mat_GetDouble3(Mat m, int x, int y, int z);
+
+void Mat_SetTo(Mat m, Scalar value);
+void Mat_SetUChar(Mat m, int row, int col, uint8_t val);
+void Mat_SetUChar3(Mat m, int x, int y, int z, uint8_t val);
+void Mat_SetSChar(Mat m, int row, int col, int8_t val);
+void Mat_SetSChar3(Mat m, int x, int y, int z, int8_t val);
+void Mat_SetShort(Mat m, int row, int col, int16_t val);
+void Mat_SetShort3(Mat m, int x, int y, int z, int16_t val);
+void Mat_SetInt(Mat m, int row, int col, int32_t val);
+void Mat_SetInt3(Mat m, int x, int y, int z, int32_t val);
+void Mat_SetFloat(Mat m, int row, int col, float val);
+void Mat_SetFloat3(Mat m, int x, int y, int z, float val);
+void Mat_SetDouble(Mat m, int row, int col, double val);
+void Mat_SetDouble3(Mat m, int x, int y, int z, double val);
+
+void Mat_AddUChar(Mat m, uint8_t val);
+void Mat_SubtractUChar(Mat m, uint8_t val);
+void Mat_MultiplyUChar(Mat m, uint8_t val);
+void Mat_DivideUChar(Mat m, uint8_t val);
+void Mat_AddFloat(Mat m, float val);
+void Mat_SubtractFloat(Mat m, float val);
+void Mat_MultiplyFloat(Mat m, float val);
+void Mat_DivideFloat(Mat m, float val);
+Mat Mat_MultiplyMatrix(Mat x, Mat y);
+
+Mat Mat_T(Mat x);
+
+void LUT(Mat src, Mat lut, Mat dst);
+
+void Mat_AbsDiff(Mat src1, Mat src2, Mat dst);
+void Mat_Add(Mat src1, Mat src2, Mat dst);
+void Mat_AddWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst);
+void Mat_BitwiseAnd(Mat src1, Mat src2, Mat dst);
+void Mat_BitwiseAndWithMask(Mat src1, Mat src2, Mat dst, Mat mask);
+void Mat_BitwiseNot(Mat src1, Mat dst);
+void Mat_BitwiseNotWithMask(Mat src1, Mat dst, Mat mask);
+void Mat_BitwiseOr(Mat src1, Mat src2, Mat dst);
+void Mat_BitwiseOrWithMask(Mat src1, Mat src2, Mat dst, Mat mask);
+void Mat_BitwiseXor(Mat src1, Mat src2, Mat dst);
+void Mat_BitwiseXorWithMask(Mat src1, Mat src2, Mat dst, Mat mask);
+void Mat_Compare(Mat src1, Mat src2, Mat dst, int ct);
+void Mat_BatchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K,
+ Mat mask, int update, bool crosscheck);
+int Mat_BorderInterpolate(int p, int len, int borderType);
+void Mat_CalcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags, int ctype);
+void Mat_CartToPolar(Mat x, Mat y, Mat magnitude, Mat angle, bool angleInDegrees);
+bool Mat_CheckRange(Mat m);
+void Mat_CompleteSymm(Mat m, bool lowerToUpper);
+void Mat_ConvertScaleAbs(Mat src, Mat dst, double alpha, double beta);
+void Mat_CopyMakeBorder(Mat src, Mat dst, int top, int bottom, int left, int right, int borderType,
+ Scalar value);
+int Mat_CountNonZero(Mat src);
+void Mat_DCT(Mat src, Mat dst, int flags);
+double Mat_Determinant(Mat m);
+void Mat_DFT(Mat m, Mat dst, int flags);
+void Mat_Divide(Mat src1, Mat src2, Mat dst);
+bool Mat_Eigen(Mat src, Mat eigenvalues, Mat eigenvectors);
+void Mat_EigenNonSymmetric(Mat src, Mat eigenvalues, Mat eigenvectors);
+void Mat_Exp(Mat src, Mat dst);
+void Mat_ExtractChannel(Mat src, Mat dst, int coi);
+void Mat_FindNonZero(Mat src, Mat idx);
+void Mat_Flip(Mat src, Mat dst, int flipCode);
+void Mat_Gemm(Mat src1, Mat src2, double alpha, Mat src3, double beta, Mat dst, int flags);
+int Mat_GetOptimalDFTSize(int vecsize);
+void Mat_Hconcat(Mat src1, Mat src2, Mat dst);
+void Mat_Vconcat(Mat src1, Mat src2, Mat dst);
+void Rotate(Mat src, Mat dst, int rotationCode);
+void Mat_Idct(Mat src, Mat dst, int flags);
+void Mat_Idft(Mat src, Mat dst, int flags, int nonzeroRows);
+void Mat_InRange(Mat src, Mat lowerb, Mat upperb, Mat dst);
+void Mat_InRangeWithScalar(Mat src, const Scalar lowerb, const Scalar upperb, Mat dst);
+void Mat_InsertChannel(Mat src, Mat dst, int coi);
+double Mat_Invert(Mat src, Mat dst, int flags);
+double KMeans(Mat data, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers);
+double KMeansPoints(Contour points, int k, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers);
+void Mat_Log(Mat src, Mat dst);
+void Mat_Magnitude(Mat x, Mat y, Mat magnitude);
+void Mat_Max(Mat src1, Mat src2, Mat dst);
+void Mat_MeanStdDev(Mat src, Mat dstMean, Mat dstStdDev);
+void Mat_Merge(struct Mats mats, Mat dst);
+void Mat_Min(Mat src1, Mat src2, Mat dst);
+void Mat_MinMaxIdx(Mat m, double* minVal, double* maxVal, int* minIdx, int* maxIdx);
+void Mat_MinMaxLoc(Mat m, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc);
+void Mat_MulSpectrums(Mat a, Mat b, Mat c, int flags);
+void Mat_Multiply(Mat src1, Mat src2, Mat dst);
+void Mat_Subtract(Mat src1, Mat src2, Mat dst);
+void Mat_Normalize(Mat src, Mat dst, double alpha, double beta, int typ);
+double Norm(Mat src1, int normType);
+void Mat_PerspectiveTransform(Mat src, Mat dst, Mat tm);
+bool Mat_Solve(Mat src1, Mat src2, Mat dst, int flags);
+int Mat_SolveCubic(Mat coeffs, Mat roots);
+double Mat_SolvePoly(Mat coeffs, Mat roots, int maxIters);
+void Mat_Reduce(Mat src, Mat dst, int dim, int rType, int dType);
+void Mat_Repeat(Mat src, int nY, int nX, Mat dst);
+void Mat_ScaleAdd(Mat src1, double alpha, Mat src2, Mat dst);
+void Mat_SetIdentity(Mat src, double scalar);
+void Mat_Sort(Mat src, Mat dst, int flags);
+void Mat_SortIdx(Mat src, Mat dst, int flags);
+void Mat_Split(Mat src, struct Mats* mats);
+void Mat_Subtract(Mat src1, Mat src2, Mat dst);
+Scalar Mat_Trace(Mat src);
+void Mat_Transform(Mat src, Mat dst, Mat tm);
+void Mat_Transpose(Mat src, Mat dst);
+void Mat_PolarToCart(Mat magnitude, Mat degree, Mat x, Mat y, bool angleInDegrees);
+void Mat_Pow(Mat src, double power, Mat dst);
+void Mat_Phase(Mat x, Mat y, Mat angle, bool angleInDegrees);
+Scalar Mat_Sum(Mat src1);
+
+TermCriteria TermCriteria_New(int typ, int maxCount, double epsilon);
+
+int64_t GetCVTickCount();
+double GetTickFrequency();
+
+Mat Mat_rowRange(Mat m,int startrow,int endrow);
+Mat Mat_colRange(Mat m,int startrow,int endrow);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_CORE_H_
diff --git a/vendor/gocv.io/x/gocv/core_string.go b/vendor/gocv.io/x/gocv/core_string.go
new file mode 100644
index 0000000..39d5d22
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/core_string.go
@@ -0,0 +1,211 @@
+package gocv
+
+func (c MatType) String() string {
+ switch c {
+ case MatTypeCV8U:
+ return "CV8U"
+ case MatTypeCV8UC2:
+ return "CV8UC2"
+ case MatTypeCV8UC3:
+ return "CV8UC3"
+ case MatTypeCV8UC4:
+ return "CV8UC4"
+ case MatTypeCV16U:
+ return "CV16U"
+ case MatTypeCV16UC2:
+ return "CV16UC2"
+ case MatTypeCV16UC3:
+ return "CV16UC3"
+ case MatTypeCV16UC4:
+ return "CV16UC4"
+ case MatTypeCV16S:
+ return "CV16S"
+ case MatTypeCV16SC2:
+ return "CV16SC2"
+ case MatTypeCV16SC3:
+ return "CV16SC3"
+ case MatTypeCV16SC4:
+ return "CV16SC4"
+ case MatTypeCV32S:
+ return "CV32S"
+ case MatTypeCV32SC2:
+ return "CV32SC2"
+ case MatTypeCV32SC3:
+ return "CV32SC3"
+ case MatTypeCV32SC4:
+ return "CV32SC4"
+ case MatTypeCV32F:
+ return "CV32F"
+ case MatTypeCV32FC2:
+ return "CV32FC2"
+ case MatTypeCV32FC3:
+ return "CV32FC3"
+ case MatTypeCV32FC4:
+ return "CV32FC4"
+ case MatTypeCV64F:
+ return "CV64F"
+ case MatTypeCV64FC2:
+ return "CV64FC2"
+ case MatTypeCV64FC3:
+ return "CV64FC3"
+ case MatTypeCV64FC4:
+ return "CV64FC4"
+ }
+ return ""
+}
+
+func (c CompareType) String() string {
+ switch c {
+ case CompareEQ:
+ return "eq"
+ case CompareGT:
+ return "gt"
+ case CompareGE:
+ return "ge"
+ case CompareLT:
+ return "lt"
+ case CompareLE:
+ return "le"
+ case CompareNE:
+ return "ne"
+ }
+ return ""
+}
+
+func (c CovarFlags) String() string {
+ switch c {
+ case CovarScrambled:
+ return "covar-scrambled"
+ case CovarNormal:
+ return "covar-normal"
+ case CovarUseAvg:
+ return "covar-use-avg"
+ case CovarScale:
+ return "covar-scale"
+ case CovarRows:
+ return "covar-rows"
+ case CovarCols:
+ return "covar-cols"
+ }
+ return ""
+}
+
+func (c DftFlags) String() string {
+ switch c {
+ case DftForward:
+ return "dft-forward"
+ case DftInverse:
+ return "dft-inverse"
+ case DftScale:
+ return "dft-scale"
+ case DftRows:
+ return "dft-rows"
+ case DftComplexOutput:
+ return "dft-complex-output"
+ case DftRealOutput:
+ return "dft-real-output"
+ case DftComplexInput:
+ return "dft-complex-input"
+ }
+ return ""
+}
+
+func (c RotateFlag) String() string {
+ switch c {
+ case Rotate90Clockwise:
+ return "rotate-90-clockwise"
+ case Rotate180Clockwise:
+ return "rotate-180-clockwise"
+ case Rotate90CounterClockwise:
+ return "rotate-90-counter-clockwise"
+ }
+ return ""
+}
+
+func (c KMeansFlags) String() string {
+ switch c {
+ case KMeansRandomCenters:
+ return "kmeans-random-centers"
+ case KMeansPPCenters:
+ return "kmeans-pp-centers"
+ case KMeansUseInitialLabels:
+ return "kmeans-use-initial-labels"
+ }
+ return ""
+}
+
+func (c NormType) String() string {
+ switch c {
+ case NormInf:
+ return "norm-inf"
+ case NormL1:
+ return "norm-l1"
+ case NormL2:
+ return "norm-l2"
+ case NormL2Sqr:
+ return "norm-l2-sqr"
+ case NormHamming:
+ return "norm-hamming"
+ case NormHamming2:
+ return "norm-hamming2"
+ case NormRelative:
+ return "norm-relative"
+ case NormMinMax:
+ return "norm-minmax"
+ }
+ return ""
+}
+
+func (c TermCriteriaType) String() string {
+ switch c {
+ case Count:
+ return "count"
+ case EPS:
+ return "eps"
+ }
+ return ""
+}
+
+func (c SolveDecompositionFlags) String() string {
+ switch c {
+ case SolveDecompositionLu:
+ return "solve-decomposition-lu"
+ case SolveDecompositionSvd:
+ return "solve-decomposition-svd"
+ case SolveDecompositionEing:
+ return "solve-decomposition-eing"
+ case SolveDecompositionCholesky:
+ return "solve-decomposition-cholesky"
+ case SolveDecompositionQr:
+ return "solve-decomposition-qr"
+ case SolveDecompositionNormal:
+ return "solve-decomposition-normal"
+ }
+ return ""
+}
+
+func (c ReduceTypes) String() string {
+ switch c {
+ case ReduceSum:
+ return "reduce-sum"
+ case ReduceAvg:
+ return "reduce-avg"
+ case ReduceMax:
+ return "reduce-max"
+ case ReduceMin:
+ return "reduce-min"
+ }
+ return ""
+}
+
+func (c SortFlags) String() string {
+ switch c {
+ case SortEveryRow:
+ return "sort-every-row"
+ case SortEveryColumn:
+ return "sort-every-column"
+ case SortDescending:
+ return "sort-descending"
+ }
+ return ""
+}
diff --git a/vendor/gocv.io/x/gocv/dnn.cpp b/vendor/gocv.io/x/gocv/dnn.cpp
new file mode 100644
index 0000000..3885546
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/dnn.cpp
@@ -0,0 +1,189 @@
+#include "dnn.h"
+
+Net Net_ReadNet(const char* model, const char* config) {
+ Net n = new cv::dnn::Net(cv::dnn::readNet(model, config));
+ return n;
+}
+
+Net Net_ReadNetBytes(const char* framework, struct ByteArray model, struct ByteArray config) {
+ std::vector modelv(model.data, model.data + model.length);
+ std::vector configv(config.data, config.data + config.length);
+ Net n = new cv::dnn::Net(cv::dnn::readNet(framework, modelv, configv));
+ return n;
+}
+
+Net Net_ReadNetFromCaffe(const char* prototxt, const char* caffeModel) {
+ Net n = new cv::dnn::Net(cv::dnn::readNetFromCaffe(prototxt, caffeModel));
+ return n;
+}
+
+Net Net_ReadNetFromCaffeBytes(struct ByteArray prototxt, struct ByteArray caffeModel) {
+ Net n = new cv::dnn::Net(cv::dnn::readNetFromCaffe(prototxt.data, prototxt.length,
+ caffeModel.data, caffeModel.length));
+ return n;
+}
+
+Net Net_ReadNetFromTensorflow(const char* model) {
+ Net n = new cv::dnn::Net(cv::dnn::readNetFromTensorflow(model));
+ return n;
+}
+
+Net Net_ReadNetFromTensorflowBytes(struct ByteArray model) {
+ Net n = new cv::dnn::Net(cv::dnn::readNetFromTensorflow(model.data, model.length));
+ return n;
+}
+
+void Net_Close(Net net) {
+ delete net;
+}
+
+bool Net_Empty(Net net) {
+ return net->empty();
+}
+
+void Net_SetInput(Net net, Mat blob, const char* name) {
+ net->setInput(*blob, name);
+}
+
+Mat Net_Forward(Net net, const char* outputName) {
+ return new cv::Mat(net->forward(outputName));
+}
+
+void Net_ForwardLayers(Net net, struct Mats* outputBlobs, struct CStrings outBlobNames) {
+ std::vector< cv::Mat > blobs;
+
+ std::vector< cv::String > names;
+ for (int i = 0; i < outBlobNames.length; ++i) {
+ names.push_back(cv::String(outBlobNames.strs[i]));
+ }
+ net->forward(blobs, names);
+
+ // copy blobs into outputBlobs
+ outputBlobs->mats = new Mat[blobs.size()];
+
+ for (size_t i = 0; i < blobs.size(); ++i) {
+ outputBlobs->mats[i] = new cv::Mat(blobs[i]);
+ }
+
+ outputBlobs->length = (int)blobs.size();
+}
+
+void Net_SetPreferableBackend(Net net, int backend) {
+ net->setPreferableBackend(backend);
+}
+
+void Net_SetPreferableTarget(Net net, int target) {
+ net->setPreferableTarget(target);
+}
+
+int64_t Net_GetPerfProfile(Net net) {
+ std::vector layersTimes;
+ return net->getPerfProfile(layersTimes);
+}
+
+void Net_GetUnconnectedOutLayers(Net net, IntVector* res) {
+ std::vector< int > cids(net->getUnconnectedOutLayers());
+ int* ids = new int[cids.size()];
+
+ for (size_t i = 0; i < cids.size(); ++i) {
+ ids[i] = cids[i];
+ }
+
+ res->length = cids.size();
+ res->val = ids;
+ return;
+}
+
+void Net_GetLayerNames(Net net, CStrings* names) {
+ std::vector< cv::String > cstrs(net->getLayerNames());
+ const char **strs = new const char*[cstrs.size()];
+
+ for (size_t i = 0; i < cstrs.size(); ++i) {
+ strs[i] = cstrs[i].c_str();
+ }
+
+ names->length = cstrs.size();
+ names->strs = strs;
+ return;
+}
+
+Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
+ bool crop) {
+ cv::Size sz(size.width, size.height);
+
+ // set the output ddepth to the input image depth
+ int ddepth = image->depth();
+ if (ddepth == CV_8U)
+ {
+ // no scalar mean adjustment allowed, so ignore
+ return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, NULL, swapRB, crop, ddepth));
+ }
+
+ cv::Scalar cm(mean.val1, mean.val2, mean.val3, mean.val4);
+ return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, cm, swapRB, crop, ddepth));
+}
+
+void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
+ Scalar mean, bool swapRB, bool crop, int ddepth) {
+ std::vector imgs;
+
+ for (int i = 0; i < images.length; ++i) {
+ imgs.push_back(*images.mats[i]);
+ }
+
+ cv::Size sz(size.width, size.height);
+ cv::Scalar cm = cv::Scalar(mean.val1, mean.val2, mean.val3, mean.val4);
+
+ // TODO: handle different version signatures of this function v2 vs v3.
+ cv::dnn::blobFromImages(imgs, *blob, scalefactor, sz, cm, swapRB, crop, ddepth);
+}
+
+void Net_ImagesFromBlob(Mat blob_, struct Mats* images_) {
+ std::vector imgs;
+ cv::dnn::imagesFromBlob(*blob_, imgs);
+ images_->mats = new Mat[imgs.size()];
+
+ for (size_t i = 0; i < imgs.size(); ++i) {
+ images_->mats[i] = new cv::Mat(imgs[i]);
+ }
+ images_->length = (int) imgs.size();
+}
+
+Mat Net_GetBlobChannel(Mat blob, int imgidx, int chnidx) {
+ size_t w = blob->size[3];
+ size_t h = blob->size[2];
+ return new cv::Mat(h, w, CV_32F, blob->ptr(imgidx, chnidx));
+}
+
+Scalar Net_GetBlobSize(Mat blob) {
+ Scalar scal = Scalar();
+ scal.val1 = blob->size[0];
+ scal.val2 = blob->size[1];
+ scal.val3 = blob->size[2];
+ scal.val4 = blob->size[3];
+ return scal;
+}
+
+Layer Net_GetLayer(Net net, int layerid) {
+ return new cv::Ptr(net->getLayer(layerid));
+}
+
+void Layer_Close(Layer layer) {
+ delete layer;
+}
+
+int Layer_InputNameToIndex(Layer layer, const char* name) {
+ return (*layer)->inputNameToIndex(name);
+}
+
+int Layer_OutputNameToIndex(Layer layer, const char* name) {
+ return (*layer)->outputNameToIndex(name);
+}
+
+const char* Layer_GetName(Layer layer) {
+ return (*layer)->name.c_str();
+}
+
+const char* Layer_GetType(Layer layer) {
+ return (*layer)->type.c_str();
+}
diff --git a/vendor/gocv.io/x/gocv/dnn.go b/vendor/gocv.io/x/gocv/dnn.go
new file mode 100644
index 0000000..97374d9
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/dnn.go
@@ -0,0 +1,472 @@
+package gocv
+
+/*
+#include
+#include "dnn.h"
+*/
+import "C"
+import (
+ "image"
+ "reflect"
+ "unsafe"
+)
+
+// Net allows you to create and manipulate comprehensive artificial neural networks.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html
+//
+type Net struct {
+ // C.Net
+ p unsafe.Pointer
+}
+
+// NetBackendType is the type for the various different kinds of DNN backends.
+type NetBackendType int
+
+const (
+ // NetBackendDefault is the default backend.
+ NetBackendDefault NetBackendType = 0
+
+ // NetBackendHalide is the Halide backend.
+ NetBackendHalide NetBackendType = 1
+
+ // NetBackendOpenVINO is the OpenVINO backend.
+ NetBackendOpenVINO NetBackendType = 2
+
+ // NetBackendOpenCV is the OpenCV backend.
+ NetBackendOpenCV NetBackendType = 3
+
+ // NetBackendVKCOM is the Vulkan backend.
+ NetBackendVKCOM NetBackendType = 4
+)
+
+// ParseNetBackend returns a valid NetBackendType given a string. Valid values are:
+// - halide
+// - openvino
+// - opencv
+// - vulkan
+// - default
+func ParseNetBackend(backend string) NetBackendType {
+ switch backend {
+ case "halide":
+ return NetBackendHalide
+ case "openvino":
+ return NetBackendOpenVINO
+ case "opencv":
+ return NetBackendOpenCV
+ case "vulkan":
+ return NetBackendVKCOM
+ default:
+ return NetBackendDefault
+ }
+}
+
+// NetTargetType is the type for the various different kinds of DNN device targets.
+type NetTargetType int
+
+const (
+ // NetTargetCPU is the default CPU device target.
+ NetTargetCPU NetTargetType = 0
+
+ // NetTargetFP32 is the 32-bit OpenCL target.
+ NetTargetFP32 NetTargetType = 1
+
+ // NetTargetFP16 is the 16-bit OpenCL target.
+ NetTargetFP16 NetTargetType = 2
+
+ // NetTargetVPU is the Movidius VPU target.
+ NetTargetVPU NetTargetType = 3
+
+ // NetTargetVulkan is the NVIDIA Vulkan target.
+ NetTargetVulkan NetTargetType = 4
+
+ // NetTargetFPGA is the FPGA target.
+ NetTargetFPGA NetTargetType = 5
+)
+
+// ParseNetTarget returns a valid NetTargetType given a string. Valid values are:
+// - cpu
+// - fp32
+// - fp16
+// - vpu
+// - vulkan
+// - fpga
+func ParseNetTarget(target string) NetTargetType {
+ switch target {
+ case "cpu":
+ return NetTargetCPU
+ case "fp32":
+ return NetTargetFP32
+ case "fp16":
+ return NetTargetFP16
+ case "vpu":
+ return NetTargetVPU
+ case "vulkan":
+ return NetTargetVulkan
+ case "fpga":
+ return NetTargetFPGA
+ default:
+ return NetTargetCPU
+ }
+}
+
+// Close Net
+func (net *Net) Close() error {
+ C.Net_Close((C.Net)(net.p))
+ net.p = nil
+ return nil
+}
+
+// Empty returns true if there are no layers in the network.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#a6a5778787d5b8770deab5eda6968e66c
+//
+func (net *Net) Empty() bool {
+ return bool(C.Net_Empty((C.Net)(net.p)))
+}
+
+// SetInput sets the new value for the layer output blob.
+//
+// For further details, please see:
+// https://docs.opencv.org/trunk/db/d30/classcv_1_1dnn_1_1Net.html#a672a08ae76444d75d05d7bfea3e4a328
+//
+func (net *Net) SetInput(blob Mat, name string) {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+
+ C.Net_SetInput((C.Net)(net.p), blob.p, cName)
+}
+
+// Forward runs forward pass to compute output of layer with name outputName.
+//
+// For further details, please see:
+// https://docs.opencv.org/trunk/db/d30/classcv_1_1dnn_1_1Net.html#a98ed94cb6ef7063d3697259566da310b
+//
+func (net *Net) Forward(outputName string) Mat {
+ cName := C.CString(outputName)
+ defer C.free(unsafe.Pointer(cName))
+
+ return newMat(C.Net_Forward((C.Net)(net.p), cName))
+}
+
+// ForwardLayers forward pass to compute outputs of layers listed in outBlobNames.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.4.1/db/d30/classcv_1_1dnn_1_1Net.html#adb34d7650e555264c7da3b47d967311b
+//
+func (net *Net) ForwardLayers(outBlobNames []string) (blobs []Mat) {
+ cMats := C.struct_Mats{}
+ C.Net_ForwardLayers((C.Net)(net.p), &(cMats), toCStrings(outBlobNames))
+ blobs = make([]Mat, cMats.length)
+ for i := C.int(0); i < cMats.length; i++ {
+ blobs[i].p = C.Mats_get(cMats, i)
+ }
+ return
+}
+
+// SetPreferableBackend ask network to use specific computation backend.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.4/db/d30/classcv_1_1dnn_1_1Net.html#a7f767df11386d39374db49cd8df8f59e
+//
+func (net *Net) SetPreferableBackend(backend NetBackendType) error {
+ C.Net_SetPreferableBackend((C.Net)(net.p), C.int(backend))
+ return nil
+}
+
+// SetPreferableTarget ask network to make computations on specific target device.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.4/db/d30/classcv_1_1dnn_1_1Net.html#a9dddbefbc7f3defbe3eeb5dc3d3483f4
+//
+func (net *Net) SetPreferableTarget(target NetTargetType) error {
+ C.Net_SetPreferableTarget((C.Net)(net.p), C.int(target))
+ return nil
+}
+
+// ReadNet reads a deep learning network represented in one of the supported formats.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.4/d6/d0f/group__dnn.html#ga3b34fe7a29494a6a4295c169a7d32422
+//
+func ReadNet(model string, config string) Net {
+ cModel := C.CString(model)
+ defer C.free(unsafe.Pointer(cModel))
+
+ cConfig := C.CString(config)
+ defer C.free(unsafe.Pointer(cConfig))
+ return Net{p: unsafe.Pointer(C.Net_ReadNet(cModel, cConfig))}
+}
+
+// ReadNetBytes reads a deep learning network represented in one of the supported formats.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga138439da76f26266fdefec9723f6c5cd
+//
+func ReadNetBytes(framework string, model []byte, config []byte) (Net, error) {
+ cFramework := C.CString(framework)
+ defer C.free(unsafe.Pointer(cFramework))
+ bModel, err := toByteArray(model)
+ if err != nil {
+ return Net{}, err
+ }
+ bConfig, err := toByteArray(config)
+ if err != nil {
+ return Net{}, err
+ }
+ return Net{p: unsafe.Pointer(C.Net_ReadNetBytes(cFramework, *bModel, *bConfig))}, nil
+}
+
+// ReadNetFromCaffe reads a network model stored in Caffe framework's format.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga29d0ea5e52b1d1a6c2681e3f7d68473a
+//
+func ReadNetFromCaffe(prototxt string, caffeModel string) Net {
+ cprototxt := C.CString(prototxt)
+ defer C.free(unsafe.Pointer(cprototxt))
+
+ cmodel := C.CString(caffeModel)
+ defer C.free(unsafe.Pointer(cmodel))
+ return Net{p: unsafe.Pointer(C.Net_ReadNetFromCaffe(cprototxt, cmodel))}
+}
+
+// ReadNetFromCaffeBytes reads a network model stored in Caffe model in memory.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga946b342af1355185a7107640f868b64a
+//
+func ReadNetFromCaffeBytes(prototxt []byte, caffeModel []byte) (Net, error) {
+ bPrototxt, err := toByteArray(prototxt)
+ if err != nil {
+ return Net{}, err
+ }
+ bCaffeModel, err := toByteArray(caffeModel)
+ if err != nil {
+ return Net{}, err
+ }
+ return Net{p: unsafe.Pointer(C.Net_ReadNetFromCaffeBytes(*bPrototxt, *bCaffeModel))}, nil
+}
+
+// ReadNetFromTensorflow reads a network model stored in Tensorflow framework's format.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d0f/group__dnn.html#gad820b280978d06773234ba6841e77e8d
+//
+func ReadNetFromTensorflow(model string) Net {
+ cmodel := C.CString(model)
+ defer C.free(unsafe.Pointer(cmodel))
+ return Net{p: unsafe.Pointer(C.Net_ReadNetFromTensorflow(cmodel))}
+}
+
+// ReadNetFromTensorflowBytes reads a network model stored in Tensorflow framework's format.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d0f/group__dnn.html#gacdba30a7c20db2788efbf5bb16a7884d
+//
+func ReadNetFromTensorflowBytes(model []byte) (Net, error) {
+ bModel, err := toByteArray(model)
+ if err != nil {
+ return Net{}, err
+ }
+ return Net{p: unsafe.Pointer(C.Net_ReadNetFromTensorflowBytes(*bModel))}, nil
+}
+
+// BlobFromImage creates 4-dimensional blob from image. Optionally resizes and crops
+// image from center, subtract mean values, scales values by scalefactor,
+// swap Blue and Red channels.
+//
+// For further details, please see:
+// https://docs.opencv.org/trunk/d6/d0f/group__dnn.html#ga152367f253c81b53fe6862b299f5c5cd
+//
+func BlobFromImage(img Mat, scaleFactor float64, size image.Point, mean Scalar,
+ swapRB bool, crop bool) Mat {
+
+ sz := C.struct_Size{
+ width: C.int(size.X),
+ height: C.int(size.Y),
+ }
+
+ sMean := C.struct_Scalar{
+ val1: C.double(mean.Val1),
+ val2: C.double(mean.Val2),
+ val3: C.double(mean.Val3),
+ val4: C.double(mean.Val4),
+ }
+
+ return newMat(C.Net_BlobFromImage(img.p, C.double(scaleFactor), sz, sMean, C.bool(swapRB), C.bool(crop)))
+}
+
+// BlobFromImages Creates 4-dimensional blob from series of images.
+// Optionally resizes and crops images from center, subtract mean values,
+// scales values by scalefactor, swap Blue and Red channels.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga2b89ed84432e4395f5a1412c2926293c
+//
+func BlobFromImages(imgs []Mat, blob *Mat, scaleFactor float64, size image.Point, mean Scalar,
+ swapRB bool, crop bool, ddepth int) {
+
+ cMatArray := make([]C.Mat, len(imgs))
+ for i, r := range imgs {
+ cMatArray[i] = r.p
+ }
+
+ cMats := C.struct_Mats{
+ mats: (*C.Mat)(&cMatArray[0]),
+ length: C.int(len(imgs)),
+ }
+
+ sz := C.struct_Size{
+ width: C.int(size.X),
+ height: C.int(size.Y),
+ }
+
+ sMean := C.struct_Scalar{
+ val1: C.double(mean.Val1),
+ val2: C.double(mean.Val2),
+ val3: C.double(mean.Val3),
+ val4: C.double(mean.Val4),
+ }
+
+ C.Net_BlobFromImages(cMats, blob.p, C.double(scaleFactor), sz, sMean, C.bool(swapRB), C.bool(crop), C.int(ddepth))
+}
+
+// ImagesFromBlob Parse a 4D blob and output the images it contains as
+// 2D arrays through a simpler data structure (std::vector).
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga4051b5fa2ed5f54b76c059a8625df9f5
+//
+func ImagesFromBlob(blob Mat, imgs []Mat) {
+ cMats := C.struct_Mats{}
+ C.Net_ImagesFromBlob(blob.p, &(cMats))
+ // mv = make([]Mat, cMats.length)
+ for i := C.int(0); i < cMats.length; i++ {
+ imgs[i].p = C.Mats_get(cMats, i)
+ }
+}
+
+// GetBlobChannel extracts a single (2d)channel from a 4 dimensional blob structure
+// (this might e.g. contain the results of a SSD or YOLO detection,
+// a bones structure from pose detection, or a color plane from Colorization)
+//
+func GetBlobChannel(blob Mat, imgidx int, chnidx int) Mat {
+ return newMat(C.Net_GetBlobChannel(blob.p, C.int(imgidx), C.int(chnidx)))
+}
+
+// GetBlobSize retrieves the 4 dimensional size information in (N,C,H,W) order
+//
+func GetBlobSize(blob Mat) Scalar {
+ s := C.Net_GetBlobSize(blob.p)
+ return NewScalar(float64(s.val1), float64(s.val2), float64(s.val3), float64(s.val4))
+}
+
+// Layer is a wrapper around the cv::dnn::Layer algorithm.
+type Layer struct {
+ // C.Layer
+ p unsafe.Pointer
+}
+
+// GetLayer returns pointer to layer with specified id from the network.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#a70aec7f768f38c32b1ee25f3a56526df
+//
+func (net *Net) GetLayer(layer int) Layer {
+ return Layer{p: unsafe.Pointer(C.Net_GetLayer((C.Net)(net.p), C.int(layer)))}
+}
+
+// GetPerfProfile returns overall time for inference and timings (in ticks) for layers
+//
+// For further details, please see:
+// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#a06ce946f675f75d1c020c5ddbc78aedc
+//
+func (net *Net) GetPerfProfile() float64 {
+ return float64(C.Net_GetPerfProfile((C.Net)(net.p)))
+}
+
+// GetUnconnectedOutLayers returns indexes of layers with unconnected outputs.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#ae62a73984f62c49fd3e8e689405b056a
+//
+func (net *Net) GetUnconnectedOutLayers() (ids []int) {
+ cids := C.IntVector{}
+ C.Net_GetUnconnectedOutLayers((C.Net)(net.p), &cids)
+
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cids.val)),
+ Len: int(cids.length),
+ Cap: int(cids.length),
+ }
+ pcids := *(*[]int)(unsafe.Pointer(h))
+
+ for i := 0; i < int(cids.length); i++ {
+ ids = append(ids, int(pcids[i]))
+ }
+ return
+}
+
+// GetLayerNames returns all layer names.
+//
+// For furtherdetails, please see:
+// https://docs.opencv.org/master/db/d30/classcv_1_1dnn_1_1Net.html#ae8be9806024a0d1d41aba687cce99e6b
+//
+func (net *Net) GetLayerNames() (names []string) {
+ cstrs := C.CStrings{}
+ C.Net_GetLayerNames((C.Net)(net.p), &cstrs)
+
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cstrs.strs)),
+ Len: int(cstrs.length),
+ Cap: int(cstrs.length),
+ }
+ pcstrs := *(*[]string)(unsafe.Pointer(h))
+
+ for i := 0; i < int(cstrs.length); i++ {
+ names = append(names, string(pcstrs[i]))
+ }
+ return
+}
+
+// Close Layer
+func (l *Layer) Close() error {
+ C.Layer_Close((C.Layer)(l.p))
+ l.p = nil
+ return nil
+}
+
+// GetName returns name for this layer.
+func (l *Layer) GetName() string {
+ return C.GoString(C.Layer_GetName((C.Layer)(l.p)))
+}
+
+// GetType returns type for this layer.
+func (l *Layer) GetType() string {
+ return C.GoString(C.Layer_GetType((C.Layer)(l.p)))
+}
+
+// InputNameToIndex returns index of input blob in input array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d6c/classcv_1_1dnn_1_1Layer.html#a60ffc8238f3fa26cd3f49daa7ac0884b
+//
+func (l *Layer) InputNameToIndex(name string) int {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ return int(C.Layer_InputNameToIndex((C.Layer)(l.p), cName))
+}
+
+// OutputNameToIndex returns index of output blob in output array.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d6c/classcv_1_1dnn_1_1Layer.html#a60ffc8238f3fa26cd3f49daa7ac0884b
+//
+func (l *Layer) OutputNameToIndex(name string) int {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ return int(C.Layer_OutputNameToIndex((C.Layer)(l.p), cName))
+}
diff --git a/vendor/gocv.io/x/gocv/dnn.h b/vendor/gocv.io/x/gocv/dnn.h
new file mode 100644
index 0000000..7f85842
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/dnn.h
@@ -0,0 +1,58 @@
+#ifndef _OPENCV3_DNN_H_
+#define _OPENCV3_DNN_H_
+
+#include
+
+#ifdef __cplusplus
+#include
+#include
+extern "C" {
+#endif
+
+#include "core.h"
+
+#ifdef __cplusplus
+typedef cv::dnn::Net* Net;
+typedef cv::Ptr* Layer;
+#else
+typedef void* Net;
+typedef void* Layer;
+#endif
+
+Net Net_ReadNet(const char* model, const char* config);
+Net Net_ReadNetBytes(const char* framework, struct ByteArray model, struct ByteArray config);
+Net Net_ReadNetFromCaffe(const char* prototxt, const char* caffeModel);
+Net Net_ReadNetFromCaffeBytes(struct ByteArray prototxt, struct ByteArray caffeModel);
+Net Net_ReadNetFromTensorflow(const char* model);
+Net Net_ReadNetFromTensorflowBytes(struct ByteArray model);
+Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
+ bool crop);
+void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
+ Scalar mean, bool swapRB, bool crop, int ddepth);
+void Net_ImagesFromBlob(Mat blob_, struct Mats* images_);
+void Net_Close(Net net);
+bool Net_Empty(Net net);
+void Net_SetInput(Net net, Mat blob, const char* name);
+Mat Net_Forward(Net net, const char* outputName);
+void Net_ForwardLayers(Net net, struct Mats* outputBlobs, struct CStrings outBlobNames);
+void Net_SetPreferableBackend(Net net, int backend);
+void Net_SetPreferableTarget(Net net, int target);
+int64_t Net_GetPerfProfile(Net net);
+void Net_GetUnconnectedOutLayers(Net net, IntVector* res);
+void Net_GetLayerNames(Net net, CStrings* names);
+
+Mat Net_GetBlobChannel(Mat blob, int imgidx, int chnidx);
+Scalar Net_GetBlobSize(Mat blob);
+
+Layer Net_GetLayer(Net net, int layerid);
+void Layer_Close(Layer layer);
+int Layer_InputNameToIndex(Layer layer, const char* name);
+int Layer_OutputNameToIndex(Layer layer, const char* name);
+const char* Layer_GetName(Layer layer);
+const char* Layer_GetType(Layer layer);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_DNN_H_
diff --git a/vendor/gocv.io/x/gocv/dnn_async_openvino.go b/vendor/gocv.io/x/gocv/dnn_async_openvino.go
new file mode 100644
index 0000000..0425d99
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/dnn_async_openvino.go
@@ -0,0 +1,26 @@
+// +build openvino
+
+package gocv
+
+import (
+ "unsafe"
+)
+
+/*
+#include
+#include "dnn.h"
+#include "asyncarray.h"
+*/
+import "C"
+
+// ForwardAsync runs forward pass to compute output of layer with name outputName.
+//
+// For further details, please see:
+// https://docs.opencv.org/trunk/db/d30/classcv_1_1dnn_1_1Net.html#a814890154ea9e10b132fec00b6f6ba30
+//
+func (net *Net) ForwardAsync(outputName string) AsyncArray {
+ cName := C.CString(outputName)
+ defer C.free(unsafe.Pointer(cName))
+
+ return newAsyncArray(C.Net_forwardAsync((C.Net)(net.p), cName))
+}
diff --git a/vendor/gocv.io/x/gocv/dnn_ext.go b/vendor/gocv.io/x/gocv/dnn_ext.go
new file mode 100644
index 0000000..9ac2517
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/dnn_ext.go
@@ -0,0 +1,67 @@
+package gocv
+
+import (
+ "image"
+)
+
+// FP16BlobFromImage is an extended helper function to convert an Image to a half-float blob, as used by
+// the Movidius Neural Compute Stick.
+func FP16BlobFromImage(img Mat, scaleFactor float32, size image.Point, mean float32,
+ swapRB bool, crop bool) []byte {
+
+ // resizes image so it maintains aspect ratio
+ width := float32(img.Cols())
+ height := float32(img.Rows())
+
+ square := NewMatWithSize(size.Y, size.X, img.Type())
+ defer square.Close()
+
+ maxDim := height
+ var scale float32 = 1.0
+ if width > height {
+ maxDim = width
+ scale = float32(size.X) / float32(maxDim)
+ }
+ if width < height {
+ scale = float32(size.Y) / float32(maxDim)
+ }
+
+ var roi image.Rectangle
+ if width >= height {
+ roi.Min.X = 0
+ roi.Min.Y = int(float32(size.Y)-height*scale) / 2
+ roi.Max.X = size.X
+ roi.Max.Y = int(height * scale)
+ } else {
+ roi.Min.X = int(float32(size.X)-width*scale) / 2
+ roi.Min.Y = 0
+ roi.Max.X = int(width * scale)
+ roi.Max.Y = size.Y
+ }
+
+ Resize(img, &square, roi.Max, 0, 0, InterpolationDefault)
+
+ if swapRB {
+ CvtColor(square, &square, ColorBGRToRGB)
+ }
+
+ fp32Image := NewMat()
+ defer fp32Image.Close()
+
+ square.ConvertTo(&fp32Image, MatTypeCV32F)
+
+ if mean != 0 {
+ // subtract mean
+ fp32Image.SubtractFloat(mean)
+ }
+
+ if scaleFactor != 1.0 {
+ // multiply by scale factor
+ fp32Image.MultiplyFloat(scaleFactor)
+ }
+
+ fp16Blob := fp32Image.ConvertFp16()
+ defer fp16Blob.Close()
+
+ return fp16Blob.ToBytes()
+}
diff --git a/vendor/gocv.io/x/gocv/dnn_string.go b/vendor/gocv.io/x/gocv/dnn_string.go
new file mode 100644
index 0000000..ac42d8d
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/dnn_string.go
@@ -0,0 +1,35 @@
+package gocv
+
+func (c NetBackendType) String() string {
+ switch c {
+ case NetBackendDefault:
+ return ""
+ case NetBackendHalide:
+ return "halide"
+ case NetBackendOpenVINO:
+ return "openvino"
+ case NetBackendOpenCV:
+ return "opencv"
+ case NetBackendVKCOM:
+ return "vulkan"
+ }
+ return ""
+}
+
+func (c NetTargetType) String() string {
+ switch c {
+ case NetTargetCPU:
+ return "cpu"
+ case NetTargetFP32:
+ return "fp32"
+ case NetTargetFP16:
+ return "fp16"
+ case NetTargetVPU:
+ return "vpu"
+ case NetTargetVulkan:
+ return "vulkan"
+ case NetTargetFPGA:
+ return "fpga"
+ }
+ return ""
+}
diff --git a/vendor/gocv.io/x/gocv/env.cmd b/vendor/gocv.io/x/gocv/env.cmd
new file mode 100644
index 0000000..02babfd
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/env.cmd
@@ -0,0 +1,2 @@
+ECHO This script is no longer necessary and has been deprecated.
+ECHO See the Custom Environment section of the README if you need to customize your environment.
diff --git a/vendor/gocv.io/x/gocv/env.sh b/vendor/gocv.io/x/gocv/env.sh
new file mode 100644
index 0000000..343148f
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/env.sh
@@ -0,0 +1,2 @@
+echo "This script is no longer necessary and has been deprecated."
+echo "See the Custom Environment section of the README if you need to customize your environment."
diff --git a/vendor/gocv.io/x/gocv/features2d.cpp b/vendor/gocv.io/x/gocv/features2d.cpp
new file mode 100644
index 0000000..8e98c28
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/features2d.cpp
@@ -0,0 +1,430 @@
+#include "features2d.h"
+
+AKAZE AKAZE_Create() {
+ // TODO: params
+ return new cv::Ptr(cv::AKAZE::create());
+}
+
+void AKAZE_Close(AKAZE a) {
+ delete a;
+}
+
+struct KeyPoints AKAZE_Detect(AKAZE a, Mat src) {
+ std::vector detected;
+ (*a)->detect(*src, detected);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+struct KeyPoints AKAZE_DetectAndCompute(AKAZE a, Mat src, Mat mask, Mat desc) {
+ std::vector detected;
+ (*a)->detectAndCompute(*src, *mask, detected, *desc);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+AgastFeatureDetector AgastFeatureDetector_Create() {
+ // TODO: params
+ return new cv::Ptr(cv::AgastFeatureDetector::create());
+}
+
+void AgastFeatureDetector_Close(AgastFeatureDetector a) {
+ delete a;
+}
+
+struct KeyPoints AgastFeatureDetector_Detect(AgastFeatureDetector a, Mat src) {
+ std::vector detected;
+ (*a)->detect(*src, detected);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+BRISK BRISK_Create() {
+ // TODO: params
+ return new cv::Ptr(cv::BRISK::create());
+}
+
+void BRISK_Close(BRISK b) {
+ delete b;
+}
+
+struct KeyPoints BRISK_Detect(BRISK b, Mat src) {
+ std::vector detected;
+ (*b)->detect(*src, detected);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+struct KeyPoints BRISK_DetectAndCompute(BRISK b, Mat src, Mat mask, Mat desc) {
+ std::vector detected;
+ (*b)->detectAndCompute(*src, *mask, detected, *desc);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+GFTTDetector GFTTDetector_Create() {
+ // TODO: params
+ return new cv::Ptr(cv::GFTTDetector::create());
+}
+
+void GFTTDetector_Close(GFTTDetector a) {
+ delete a;
+}
+
+struct KeyPoints GFTTDetector_Detect(GFTTDetector a, Mat src) {
+ std::vector detected;
+ (*a)->detect(*src, detected);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+KAZE KAZE_Create() {
+ // TODO: params
+ return new cv::Ptr(cv::KAZE::create());
+}
+
+void KAZE_Close(KAZE a) {
+ delete a;
+}
+
+struct KeyPoints KAZE_Detect(KAZE a, Mat src) {
+ std::vector detected;
+ (*a)->detect(*src, detected);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+struct KeyPoints KAZE_DetectAndCompute(KAZE a, Mat src, Mat mask, Mat desc) {
+ std::vector detected;
+ (*a)->detectAndCompute(*src, *mask, detected, *desc);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+MSER MSER_Create() {
+ // TODO: params
+ return new cv::Ptr(cv::MSER::create());
+}
+
+void MSER_Close(MSER a) {
+ delete a;
+}
+
+struct KeyPoints MSER_Detect(MSER a, Mat src) {
+ std::vector detected;
+ (*a)->detect(*src, detected);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+FastFeatureDetector FastFeatureDetector_Create() {
+ return new cv::Ptr(cv::FastFeatureDetector::create());
+}
+
+void FastFeatureDetector_Close(FastFeatureDetector f) {
+ delete f;
+}
+
+FastFeatureDetector FastFeatureDetector_CreateWithParams(int threshold, bool nonmaxSuppression, int type) {
+ return new cv::Ptr(cv::FastFeatureDetector::create(threshold,nonmaxSuppression,static_cast(type)));
+}
+
+struct KeyPoints FastFeatureDetector_Detect(FastFeatureDetector f, Mat src) {
+ std::vector detected;
+ (*f)->detect(*src, detected);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+ORB ORB_Create() {
+ // TODO: params
+ return new cv::Ptr(cv::ORB::create());
+}
+
+void ORB_Close(ORB o) {
+ delete o;
+}
+
+struct KeyPoints ORB_Detect(ORB o, Mat src) {
+ std::vector detected;
+ (*o)->detect(*src, detected);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+struct KeyPoints ORB_DetectAndCompute(ORB o, Mat src, Mat mask, Mat desc) {
+ std::vector detected;
+ (*o)->detectAndCompute(*src, *mask, detected, *desc);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+cv::SimpleBlobDetector::Params ConvertCParamsToCPPParams(SimpleBlobDetectorParams params) {
+ cv::SimpleBlobDetector::Params converted;
+
+ converted.blobColor = params.blobColor;
+ converted.filterByArea = params.filterByArea;
+ converted.filterByCircularity = params.filterByCircularity;
+ converted.filterByColor = params.filterByColor;
+ converted.filterByConvexity = params.filterByConvexity;
+ converted.filterByInertia = params.filterByInertia;
+ converted.maxArea = params.maxArea;
+ converted.maxCircularity = params.maxCircularity;
+ converted.maxConvexity = params.maxConvexity;
+ converted.maxInertiaRatio = params.maxInertiaRatio;
+ converted.maxThreshold = params.maxThreshold;
+ converted.minArea = params.minArea;
+ converted.minCircularity = params.minCircularity;
+ converted.minConvexity = params.minConvexity;
+ converted.minDistBetweenBlobs = params.minDistBetweenBlobs;
+ converted.minInertiaRatio = params.minInertiaRatio;
+ converted.minRepeatability = params.minRepeatability;
+ converted.minThreshold = params.minThreshold;
+ converted.thresholdStep = params.thresholdStep;
+
+ return converted;
+}
+
+SimpleBlobDetectorParams ConvertCPPParamsToCParams(cv::SimpleBlobDetector::Params params) {
+ SimpleBlobDetectorParams converted;
+
+ converted.blobColor = params.blobColor;
+ converted.filterByArea = params.filterByArea;
+ converted.filterByCircularity = params.filterByCircularity;
+ converted.filterByColor = params.filterByColor;
+ converted.filterByConvexity = params.filterByConvexity;
+ converted.filterByInertia = params.filterByInertia;
+ converted.maxArea = params.maxArea;
+ converted.maxCircularity = params.maxCircularity;
+ converted.maxConvexity = params.maxConvexity;
+ converted.maxInertiaRatio = params.maxInertiaRatio;
+ converted.maxThreshold = params.maxThreshold;
+ converted.minArea = params.minArea;
+ converted.minCircularity = params.minCircularity;
+ converted.minConvexity = params.minConvexity;
+ converted.minDistBetweenBlobs = params.minDistBetweenBlobs;
+ converted.minInertiaRatio = params.minInertiaRatio;
+ converted.minRepeatability = params.minRepeatability;
+ converted.minThreshold = params.minThreshold;
+ converted.thresholdStep = params.thresholdStep;
+
+ return converted;
+}
+
+SimpleBlobDetector SimpleBlobDetector_Create_WithParams(SimpleBlobDetectorParams params){
+ cv::SimpleBlobDetector::Params actualParams;
+ return new cv::Ptr(cv::SimpleBlobDetector::create(ConvertCParamsToCPPParams(params)));
+}
+
+SimpleBlobDetector SimpleBlobDetector_Create() {
+ return new cv::Ptr(cv::SimpleBlobDetector::create());
+}
+
+SimpleBlobDetectorParams SimpleBlobDetectorParams_Create() {
+ return ConvertCPPParamsToCParams(cv::SimpleBlobDetector::Params());
+}
+
+void SimpleBlobDetector_Close(SimpleBlobDetector b) {
+ delete b;
+}
+
+struct KeyPoints SimpleBlobDetector_Detect(SimpleBlobDetector b, Mat src) {
+ std::vector detected;
+ (*b)->detect(*src, detected);
+
+ KeyPoint* kps = new KeyPoint[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ KeyPoint k = {detected[i].pt.x, detected[i].pt.y, detected[i].size, detected[i].angle,
+ detected[i].response, detected[i].octave, detected[i].class_id
+ };
+ kps[i] = k;
+ }
+
+ KeyPoints ret = {kps, (int)detected.size()};
+ return ret;
+}
+
+BFMatcher BFMatcher_Create() {
+ return new cv::Ptr(cv::BFMatcher::create());
+}
+
+BFMatcher BFMatcher_CreateWithParams(int normType, bool crossCheck) {
+ return new cv::Ptr(cv::BFMatcher::create(normType, crossCheck));
+}
+
+void BFMatcher_Close(BFMatcher b) {
+ delete b;
+}
+
+struct MultiDMatches BFMatcher_KnnMatch(BFMatcher b, Mat query, Mat train, int k) {
+ std::vector< std::vector > matches;
+ (*b)->knnMatch(*query, *train, matches, k);
+
+ DMatches *dms = new DMatches[matches.size()];
+ for (size_t i = 0; i < matches.size(); ++i) {
+ DMatch *dmatches = new DMatch[matches[i].size()];
+ for (size_t j = 0; j < matches[i].size(); ++j) {
+ DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
+ matches[i][j].distance};
+ dmatches[j] = dmatch;
+ }
+ dms[i] = {dmatches, (int) matches[i].size()};
+ }
+ MultiDMatches ret = {dms, (int) matches.size()};
+ return ret;
+}
+
+struct MultiDMatches BFMatcher_KnnMatchWithParams(BFMatcher b, Mat query, Mat train, int k, Mat mask, bool compactResult) {
+ std::vector< std::vector > matches;
+ (*b)->knnMatch(*query, *train, matches, k, *mask, compactResult);
+
+ DMatches *dms = new DMatches[matches.size()];
+ for (size_t i = 0; i < matches.size(); ++i) {
+ DMatch *dmatches = new DMatch[matches[i].size()];
+ for (size_t j = 0; j < matches[i].size(); ++j) {
+ DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
+ matches[i][j].distance};
+ dmatches[j] = dmatch;
+ }
+ dms[i] = {dmatches, (int) matches[i].size()};
+ }
+ MultiDMatches ret = {dms, (int) matches.size()};
+ return ret;
+}
+
+void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, Scalar s, int flags) {
+ std::vector keypts;
+ cv::KeyPoint keypt;
+
+ for (int i = 0; i < kp.length; ++i) {
+ keypt = cv::KeyPoint(kp.keypoints[i].x, kp.keypoints[i].y,
+ kp.keypoints[i].size, kp.keypoints[i].angle, kp.keypoints[i].response,
+ kp.keypoints[i].octave, kp.keypoints[i].classID);
+ keypts.push_back(keypt);
+ }
+
+ cv::Scalar color = cv::Scalar(s.val1, s.val2, s.val3, s.val4);
+
+ cv::drawKeypoints(*src, keypts, *dst, color, static_cast(flags));
+}
diff --git a/vendor/gocv.io/x/gocv/features2d.go b/vendor/gocv.io/x/gocv/features2d.go
new file mode 100644
index 0000000..f36d16e
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/features2d.go
@@ -0,0 +1,750 @@
+package gocv
+
+/*
+#include
+#include "features2d.h"
+*/
+import "C"
+import (
+ "image/color"
+ "reflect"
+ "unsafe"
+)
+
+// AKAZE is a wrapper around the cv::AKAZE algorithm.
+type AKAZE struct {
+ // C.AKAZE
+ p unsafe.Pointer
+}
+
+// NewAKAZE returns a new AKAZE algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d8/d30/classcv_1_1AKAZE.html
+//
+func NewAKAZE() AKAZE {
+ return AKAZE{p: unsafe.Pointer(C.AKAZE_Create())}
+}
+
+// Close AKAZE.
+func (a *AKAZE) Close() error {
+ C.AKAZE_Close((C.AKAZE)(a.p))
+ a.p = nil
+ return nil
+}
+
+// Detect keypoints in an image using AKAZE.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
+//
+func (a *AKAZE) Detect(src Mat) []KeyPoint {
+ ret := C.AKAZE_Detect((C.AKAZE)(a.p), src.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret)
+}
+
+// DetectAndCompute keypoints and compute in an image using AKAZE.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
+//
+func (a *AKAZE) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
+ desc := NewMat()
+ ret := C.AKAZE_DetectAndCompute((C.AKAZE)(a.p), src.p, mask.p, desc.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret), desc
+}
+
+// AgastFeatureDetector is a wrapper around the cv::AgastFeatureDetector.
+type AgastFeatureDetector struct {
+ // C.AgastFeatureDetector
+ p unsafe.Pointer
+}
+
+// NewAgastFeatureDetector returns a new AgastFeatureDetector algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/d19/classcv_1_1AgastFeatureDetector.html
+//
+func NewAgastFeatureDetector() AgastFeatureDetector {
+ return AgastFeatureDetector{p: unsafe.Pointer(C.AgastFeatureDetector_Create())}
+}
+
+// Close AgastFeatureDetector.
+func (a *AgastFeatureDetector) Close() error {
+ C.AgastFeatureDetector_Close((C.AgastFeatureDetector)(a.p))
+ a.p = nil
+ return nil
+}
+
+// Detect keypoints in an image using AgastFeatureDetector.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
+//
+func (a *AgastFeatureDetector) Detect(src Mat) []KeyPoint {
+ ret := C.AgastFeatureDetector_Detect((C.AgastFeatureDetector)(a.p), src.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret)
+}
+
+// BRISK is a wrapper around the cv::BRISK algorithm.
+type BRISK struct {
+ // C.BRISK
+ p unsafe.Pointer
+}
+
+// NewBRISK returns a new BRISK algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d8/d30/classcv_1_1AKAZE.html
+//
+func NewBRISK() BRISK {
+ return BRISK{p: unsafe.Pointer(C.BRISK_Create())}
+}
+
+// Close BRISK.
+func (b *BRISK) Close() error {
+ C.BRISK_Close((C.BRISK)(b.p))
+ b.p = nil
+ return nil
+}
+
+// Detect keypoints in an image using BRISK.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
+//
+func (b *BRISK) Detect(src Mat) []KeyPoint {
+ ret := C.BRISK_Detect((C.BRISK)(b.p), src.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret)
+}
+
+// DetectAndCompute keypoints and compute in an image using BRISK.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
+//
+func (b *BRISK) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
+ desc := NewMat()
+ ret := C.BRISK_DetectAndCompute((C.BRISK)(b.p), src.p, mask.p, desc.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret), desc
+}
+
+// FastFeatureDetectorType defines the detector type
+//
+// For further details, please see:
+// https://docs.opencv.org/master/df/d74/classcv_1_1FastFeatureDetector.html#a4654f6fb0aa4b8e9123b223bfa0e2a08
+type FastFeatureDetectorType int
+
+const (
+ //FastFeatureDetectorType58 is an alias of FastFeatureDetector::TYPE_5_8
+ FastFeatureDetectorType58 FastFeatureDetectorType = 0
+ //FastFeatureDetectorType712 is an alias of FastFeatureDetector::TYPE_7_12
+ FastFeatureDetectorType712 = 1
+ //FastFeatureDetectorType916 is an alias of FastFeatureDetector::TYPE_9_16
+ FastFeatureDetectorType916 = 2
+)
+
+// FastFeatureDetector is a wrapper around the cv::FastFeatureDetector.
+type FastFeatureDetector struct {
+ // C.FastFeatureDetector
+ p unsafe.Pointer
+}
+
+// NewFastFeatureDetector returns a new FastFeatureDetector algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/df/d74/classcv_1_1FastFeatureDetector.html
+//
+func NewFastFeatureDetector() FastFeatureDetector {
+ return FastFeatureDetector{p: unsafe.Pointer(C.FastFeatureDetector_Create())}
+}
+
+// NewFastFeatureDetectorWithParams returns a new FastFeatureDetector algorithm with parameters
+//
+// For further details, please see:
+// https://docs.opencv.org/master/df/d74/classcv_1_1FastFeatureDetector.html#ab986f2ff8f8778aab1707e2642bc7f8e
+//
+func NewFastFeatureDetectorWithParams(threshold int, nonmaxSuppression bool, typ FastFeatureDetectorType) FastFeatureDetector {
+ return FastFeatureDetector{p: unsafe.Pointer(C.FastFeatureDetector_CreateWithParams(C.int(threshold), C.bool(nonmaxSuppression), C.int(typ)))}
+}
+
+// Close FastFeatureDetector.
+func (f *FastFeatureDetector) Close() error {
+ C.FastFeatureDetector_Close((C.FastFeatureDetector)(f.p))
+ f.p = nil
+ return nil
+}
+
+// Detect keypoints in an image using FastFeatureDetector.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
+//
+func (f *FastFeatureDetector) Detect(src Mat) []KeyPoint {
+ ret := C.FastFeatureDetector_Detect((C.FastFeatureDetector)(f.p), src.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret)
+}
+
+// GFTTDetector is a wrapper around the cv::GFTTDetector algorithm.
+type GFTTDetector struct {
+ // C.GFTTDetector
+ p unsafe.Pointer
+}
+
+// NewGFTTDetector returns a new GFTTDetector algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/df/d21/classcv_1_1GFTTDetector.html
+//
+func NewGFTTDetector() GFTTDetector {
+ return GFTTDetector{p: unsafe.Pointer(C.GFTTDetector_Create())}
+}
+
+// Close GFTTDetector.
+func (a *GFTTDetector) Close() error {
+ C.GFTTDetector_Close((C.GFTTDetector)(a.p))
+ a.p = nil
+ return nil
+}
+
+// Detect keypoints in an image using GFTTDetector.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
+//
+func (a *GFTTDetector) Detect(src Mat) []KeyPoint {
+ ret := C.GFTTDetector_Detect((C.GFTTDetector)(a.p), src.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret)
+}
+
+// KAZE is a wrapper around the cv::KAZE algorithm.
+type KAZE struct {
+ // C.KAZE
+ p unsafe.Pointer
+}
+
+// NewKAZE returns a new KAZE algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d61/classcv_1_1KAZE.html
+//
+func NewKAZE() KAZE {
+ return KAZE{p: unsafe.Pointer(C.KAZE_Create())}
+}
+
+// Close KAZE.
+func (a *KAZE) Close() error {
+ C.KAZE_Close((C.KAZE)(a.p))
+ a.p = nil
+ return nil
+}
+
+// Detect keypoints in an image using KAZE.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
+//
+func (a *KAZE) Detect(src Mat) []KeyPoint {
+ ret := C.KAZE_Detect((C.KAZE)(a.p), src.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret)
+}
+
+// DetectAndCompute keypoints and compute in an image using KAZE.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
+//
+func (a *KAZE) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
+ desc := NewMat()
+ ret := C.KAZE_DetectAndCompute((C.KAZE)(a.p), src.p, mask.p, desc.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret), desc
+}
+
+// MSER is a wrapper around the cv::MSER algorithm.
+type MSER struct {
+ // C.MSER
+ p unsafe.Pointer
+}
+
+// NewMSER returns a new MSER algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html
+//
+func NewMSER() MSER {
+ return MSER{p: unsafe.Pointer(C.MSER_Create())}
+}
+
+// Close MSER.
+func (a *MSER) Close() error {
+ C.MSER_Close((C.MSER)(a.p))
+ a.p = nil
+ return nil
+}
+
+// Detect keypoints in an image using MSER.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
+//
+func (a *MSER) Detect(src Mat) []KeyPoint {
+ ret := C.MSER_Detect((C.MSER)(a.p), src.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret)
+}
+
+// ORB is a wrapper around the cv::ORB.
+type ORB struct {
+ // C.ORB
+ p unsafe.Pointer
+}
+
+// NewORB returns a new ORB algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/d19/classcv_1_1AgastFeatureDetector.html
+//
+func NewORB() ORB {
+ return ORB{p: unsafe.Pointer(C.ORB_Create())}
+}
+
+// Close ORB.
+func (o *ORB) Close() error {
+ C.ORB_Close((C.ORB)(o.p))
+ o.p = nil
+ return nil
+}
+
+// Detect keypoints in an image using ORB.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
+//
+func (o *ORB) Detect(src Mat) []KeyPoint {
+ ret := C.ORB_Detect((C.ORB)(o.p), src.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret)
+}
+
+// DetectAndCompute detects keypoints and computes from an image using ORB.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677
+//
+func (o *ORB) DetectAndCompute(src Mat, mask Mat) ([]KeyPoint, Mat) {
+ desc := NewMat()
+ ret := C.ORB_DetectAndCompute((C.ORB)(o.p), src.p, mask.p, desc.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret), desc
+}
+
+// SimpleBlobDetector is a wrapper around the cv::SimpleBlobDetector.
+type SimpleBlobDetector struct {
+ // C.SimpleBlobDetector
+ p unsafe.Pointer
+}
+
+// SimpleBlobDetector_Params is a wrapper around the cv::SimpleBlobdetector::Params
+type SimpleBlobDetectorParams struct {
+ p C.SimpleBlobDetectorParams
+}
+
+// NewSimpleBlobDetector returns a new SimpleBlobDetector algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d7a/classcv_1_1SimpleBlobDetector.html
+//
+func NewSimpleBlobDetector() SimpleBlobDetector {
+ return SimpleBlobDetector{p: unsafe.Pointer(C.SimpleBlobDetector_Create())}
+}
+
+// NewSimpleBlobDetectorWithParams returns a new SimpleBlobDetector with custom parameters
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d7a/classcv_1_1SimpleBlobDetector.html
+//
+func NewSimpleBlobDetectorWithParams(params SimpleBlobDetectorParams) SimpleBlobDetector {
+ return SimpleBlobDetector{p: unsafe.Pointer(C.SimpleBlobDetector_Create_WithParams(params.p))}
+}
+
+// Close SimpleBlobDetector.
+func (b *SimpleBlobDetector) Close() error {
+ C.SimpleBlobDetector_Close((C.SimpleBlobDetector)(b.p))
+ b.p = nil
+ return nil
+}
+
+// NewSimpleBlobDetectorParams returns the default parameters for the SimpleBobDetector
+func NewSimpleBlobDetectorParams() SimpleBlobDetectorParams {
+ return SimpleBlobDetectorParams{p: C.SimpleBlobDetectorParams_Create()}
+}
+
+// SetBlobColor sets the blobColor field
+func (p *SimpleBlobDetectorParams) SetBlobColor(blobColor int) {
+ p.p.blobColor = C.uchar(blobColor)
+}
+
+// GetBlobColor gets the blobColor field
+func (p *SimpleBlobDetectorParams) GetBlobColor() int {
+ return int(p.p.blobColor)
+}
+
+// SetFilterByArea sets the filterByArea field
+func (p *SimpleBlobDetectorParams) SetFilterByArea(filterByArea bool) {
+ p.p.filterByArea = C.bool(filterByArea)
+}
+
+// GetFilterByArea gets the filterByArea field
+func (p *SimpleBlobDetectorParams) GetFilterByArea() bool {
+ return bool(p.p.filterByArea)
+}
+
+// SetFilterByCircularity sets the filterByCircularity field
+func (p *SimpleBlobDetectorParams) SetFilterByCircularity(filterByCircularity bool) {
+ p.p.filterByCircularity = C.bool(filterByCircularity)
+}
+
+// GetFilterByCircularity gets the filterByCircularity field
+func (p *SimpleBlobDetectorParams) GetFilterByCircularity() bool {
+ return bool(p.p.filterByCircularity)
+}
+
+// SetFilterByColor sets the filterByColor field
+func (p *SimpleBlobDetectorParams) SetFilterByColor(filterByColor bool) {
+ p.p.filterByColor = C.bool(filterByColor)
+}
+
+// GetFilterByColor gets the filterByColor field
+func (p *SimpleBlobDetectorParams) GetFilterByColor() bool {
+ return bool(p.p.filterByColor)
+}
+
+// SetFilterByConvexity sets the filterByConvexity field
+func (p *SimpleBlobDetectorParams) SetFilterByConvexity(filterByConvexity bool) {
+ p.p.filterByConvexity = C.bool(filterByConvexity)
+}
+
+// GetFilterByConvexity gets the filterByConvexity field
+func (p *SimpleBlobDetectorParams) GetFilterByConvexity() bool {
+ return bool(p.p.filterByConvexity)
+}
+
+// SetFilterByInertia sets the filterByInertia field
+func (p *SimpleBlobDetectorParams) SetFilterByInertia(filterByInertia bool) {
+ p.p.filterByInertia = C.bool(filterByInertia)
+}
+
+// GetFilterByInertia gets the filterByInertia field
+func (p *SimpleBlobDetectorParams) GetFilterByInertia() bool {
+ return bool(p.p.filterByInertia)
+}
+
+// SetMaxArea sets the maxArea parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMaxArea(maxArea float64) {
+ p.p.maxArea = C.float(maxArea)
+}
+
+// GetMaxArea sets the maxArea parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMaxArea() float64 {
+ return float64(p.p.maxArea)
+}
+
+// SetMaxCircularity sets the maxCircularity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMaxCircularity(maxCircularity float64) {
+ p.p.maxCircularity = C.float(maxCircularity)
+}
+
+// GetMaxCircularity sets the maxCircularity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMaxCircularity() float64 {
+ return float64(p.p.maxCircularity)
+}
+
+// SetMaxConvexity sets the maxConvexity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMaxConvexity(maxConvexity float64) {
+ p.p.maxConvexity = C.float(maxConvexity)
+}
+
+// GetMaxConvexity sets the maxConvexity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMaxConvexity() float64 {
+ return float64(p.p.maxConvexity)
+}
+
+// SetMaxInertiaRatio sets the maxInertiaRatio parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMaxInertiaRatio(maxInertiaRatio float64) {
+ p.p.maxInertiaRatio = C.float(maxInertiaRatio)
+}
+
+// GetMaxInertiaRatio sets the maxCConvexity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMaxInertiaRatio() float64 {
+ return float64(p.p.maxInertiaRatio)
+}
+
+// SetMaxThreshold sets the maxThreshold parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMaxThreshold(maxThreshold float64) {
+ p.p.maxThreshold = C.float(maxThreshold)
+}
+
+// GetMaxThreshold sets the maxCConvexity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMaxThreshold() float64 {
+ return float64(p.p.maxThreshold)
+}
+
+// SetMinArea sets the minArea parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMinArea(minArea float64) {
+ p.p.minArea = C.float(minArea)
+}
+
+// GetMinArea sets theinArea parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMinArea() float64 {
+ return float64(p.p.minArea)
+}
+
+// SetMinCircularity sets the minCircularity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMinCircularity(minCircularity float64) {
+ p.p.minCircularity = C.float(minCircularity)
+}
+
+// GetMinCircularity sets the minCircularity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMinCircularity() float64 {
+ return float64(p.p.minCircularity)
+}
+
+// SetMinConvexity sets the minConvexity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMinConvexity(minConvexity float64) {
+ p.p.minConvexity = C.float(minConvexity)
+}
+
+// GetMinConvexity sets the minConvexity parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMinConvexity() float64 {
+ return float64(p.p.minConvexity)
+}
+
+// SetMinDistBetweenBlobs sets the minDistBetweenBlobs parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMinDistBetweenBlobs(minDistBetweenBlobs float64) {
+ p.p.minDistBetweenBlobs = C.float(minDistBetweenBlobs)
+}
+
+// GetMinDistBetweenBlobs sets the minDistBetweenBlobs parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMinDistBetweenBlobs() float64 {
+ return float64(p.p.minDistBetweenBlobs)
+}
+
+// SetMinInertiaRatio sets the minInertiaRatio parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMinInertiaRatio(minInertiaRatio float64) {
+ p.p.minInertiaRatio = C.float(minInertiaRatio)
+}
+
+// GetMinInertiaRatio sets the minInertiaRatio parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMinInertiaRatio() float64 {
+ return float64(p.p.minInertiaRatio)
+}
+
+// SetMinRepeatability sets the minRepeatability parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMinRepeatability(minRepeatability int) {
+ p.p.minRepeatability = C.size_t(minRepeatability)
+}
+
+// GetMinInertiaRatio sets the minRepeatability parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMinRepeatability() int {
+ return int(p.p.minRepeatability)
+}
+
+// SetMinThreshold sets the minThreshold parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetMinThreshold(minThreshold float64) {
+ p.p.minThreshold = C.float(minThreshold)
+}
+
+// GetMinThreshold sets the minInertiaRatio parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetMinThreshold() float64 {
+ return float64(p.p.minThreshold)
+}
+
+// SetMinThreshold sets the minThreshold parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) SetThresholdStep(thresholdStep float64) {
+ p.p.thresholdStep = C.float(thresholdStep)
+}
+
+// GetMinThreshold sets the minInertiaRatio parameter for SimpleBlobDetector_Params
+func (p *SimpleBlobDetectorParams) GetThresholdStep() float64 {
+ return float64(p.p.thresholdStep)
+}
+
+// Detect keypoints in an image using SimpleBlobDetector.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887
+//
+func (b *SimpleBlobDetector) Detect(src Mat) []KeyPoint {
+ ret := C.SimpleBlobDetector_Detect((C.SimpleBlobDetector)(b.p), src.p)
+ defer C.KeyPoints_Close(ret)
+
+ return getKeyPoints(ret)
+}
+
+// getKeyPoints returns a slice of KeyPoint given a pointer to a C.KeyPoints
+func getKeyPoints(ret C.KeyPoints) []KeyPoint {
+ cArray := ret.keypoints
+ length := int(ret.length)
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cArray)),
+ Len: length,
+ Cap: length,
+ }
+ s := *(*[]C.KeyPoint)(unsafe.Pointer(&hdr))
+
+ keys := make([]KeyPoint, length)
+ for i, r := range s {
+ keys[i] = KeyPoint{float64(r.x), float64(r.y), float64(r.size), float64(r.angle), float64(r.response),
+ int(r.octave), int(r.classID)}
+ }
+ return keys
+}
+
+// BFMatcher is a wrapper around the the cv::BFMatcher algorithm
+type BFMatcher struct {
+ // C.BFMatcher
+ p unsafe.Pointer
+}
+
+// NewBFMatcher returns a new BFMatcher
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/da1/classcv_1_1BFMatcher.html#abe0bb11749b30d97f60d6ade665617bd
+//
+func NewBFMatcher() BFMatcher {
+ return BFMatcher{p: unsafe.Pointer(C.BFMatcher_Create())}
+}
+
+// NewBFMatcherWithParams creates a new BFMatchers but allows setting parameters
+// to values other than just the defaults.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/da1/classcv_1_1BFMatcher.html#abe0bb11749b30d97f60d6ade665617bd
+//
+func NewBFMatcherWithParams(normType NormType, crossCheck bool) BFMatcher {
+ return BFMatcher{p: unsafe.Pointer(C.BFMatcher_CreateWithParams(C.int(normType), C.bool(crossCheck)))}
+}
+
+// Close BFMatcher
+func (b *BFMatcher) Close() error {
+ C.BFMatcher_Close((C.BFMatcher)(b.p))
+ b.p = nil
+ return nil
+}
+
+// KnnMatch Finds the k best matches for each descriptor from a query set.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/db/d39/classcv_1_1DescriptorMatcher.html#aa880f9353cdf185ccf3013e08210483a
+//
+func (b *BFMatcher) KnnMatch(query, train Mat, k int) [][]DMatch {
+ ret := C.BFMatcher_KnnMatch((C.BFMatcher)(b.p), query.p, train.p, C.int(k))
+ defer C.MultiDMatches_Close(ret)
+
+ return getMultiDMatches(ret)
+}
+
+func getMultiDMatches(ret C.MultiDMatches) [][]DMatch {
+ cArray := ret.dmatches
+ length := int(ret.length)
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cArray)),
+ Len: length,
+ Cap: length,
+ }
+ s := *(*[]C.DMatches)(unsafe.Pointer(&hdr))
+
+ keys := make([][]DMatch, length)
+ for i := range s {
+ keys[i] = getDMatches(C.MultiDMatches_get(ret, C.int(i)))
+ }
+ return keys
+}
+
+func getDMatches(ret C.DMatches) []DMatch {
+ cArray := ret.dmatches
+ length := int(ret.length)
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cArray)),
+ Len: length,
+ Cap: length,
+ }
+ s := *(*[]C.DMatch)(unsafe.Pointer(&hdr))
+
+ keys := make([]DMatch, length)
+ for i, r := range s {
+ keys[i] = DMatch{int(r.queryIdx), int(r.trainIdx), int(r.imgIdx),
+ float64(r.distance)}
+ }
+ return keys
+}
+
+// DrawMatchesFlag are the flags setting drawing feature
+//
+// For further details please see:
+// https://docs.opencv.org/master/de/d30/structcv_1_1DrawMatchesFlags.html
+type DrawMatchesFlag int
+
+const (
+ // DrawDefault creates new image and for each keypoint only the center point will be drawn
+ DrawDefault DrawMatchesFlag = 0
+ // DrawOverOutImg draws matches on existing content of image
+ DrawOverOutImg = 1
+ // NotDrawSinglePoints will not draw single points
+ NotDrawSinglePoints = 2
+ // DrawRichKeyPoints draws the circle around each keypoint with keypoint size and orientation
+ DrawRichKeyPoints = 3
+)
+
+// DrawKeyPoints draws keypoints
+//
+// For further details please see:
+// https://docs.opencv.org/master/d4/d5d/group__features2d__draw.html#gab958f8900dd10f14316521c149a60433
+func DrawKeyPoints(src Mat, keyPoints []KeyPoint, dst *Mat, color color.RGBA, flag DrawMatchesFlag) {
+ cKeyPointArray := make([]C.struct_KeyPoint, len(keyPoints))
+
+ for i, kp := range keyPoints {
+ cKeyPointArray[i].x = C.double(kp.X)
+ cKeyPointArray[i].y = C.double(kp.Y)
+ cKeyPointArray[i].size = C.double(kp.Size)
+ cKeyPointArray[i].angle = C.double(kp.Angle)
+ cKeyPointArray[i].response = C.double(kp.Response)
+ cKeyPointArray[i].octave = C.int(kp.Octave)
+ cKeyPointArray[i].classID = C.int(kp.ClassID)
+ }
+
+ cKeyPoints := C.struct_KeyPoints{
+ keypoints: (*C.struct_KeyPoint)(&cKeyPointArray[0]),
+ length: (C.int)(len(keyPoints)),
+ }
+
+ scalar := C.struct_Scalar{
+ val1: C.double(color.R),
+ val2: C.double(color.G),
+ val3: C.double(color.B),
+ val4: C.double(color.A),
+ }
+
+ C.DrawKeyPoints(src.p, cKeyPoints, dst.p, scalar, C.int(flag))
+}
diff --git a/vendor/gocv.io/x/gocv/features2d.h b/vendor/gocv.io/x/gocv/features2d.h
new file mode 100644
index 0000000..8c68f93
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/features2d.h
@@ -0,0 +1,89 @@
+#ifndef _OPENCV3_FEATURES2D_H_
+#define _OPENCV3_FEATURES2D_H_
+
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+#include "core.h"
+
+#ifdef __cplusplus
+typedef cv::Ptr* AKAZE;
+typedef cv::Ptr* AgastFeatureDetector;
+typedef cv::Ptr* BRISK;
+typedef cv::Ptr* FastFeatureDetector;
+typedef cv::Ptr* GFTTDetector;
+typedef cv::Ptr* KAZE;
+typedef cv::Ptr* MSER;
+typedef cv::Ptr* ORB;
+typedef cv::Ptr* SimpleBlobDetector;
+typedef cv::Ptr* BFMatcher;
+#else
+typedef void* AKAZE;
+typedef void* AgastFeatureDetector;
+typedef void* BRISK;
+typedef void* FastFeatureDetector;
+typedef void* GFTTDetector;
+typedef void* KAZE;
+typedef void* MSER;
+typedef void* ORB;
+typedef void* SimpleBlobDetector;
+typedef void* BFMatcher;
+#endif
+
+AKAZE AKAZE_Create();
+void AKAZE_Close(AKAZE a);
+struct KeyPoints AKAZE_Detect(AKAZE a, Mat src);
+struct KeyPoints AKAZE_DetectAndCompute(AKAZE a, Mat src, Mat mask, Mat desc);
+
+AgastFeatureDetector AgastFeatureDetector_Create();
+void AgastFeatureDetector_Close(AgastFeatureDetector a);
+struct KeyPoints AgastFeatureDetector_Detect(AgastFeatureDetector a, Mat src);
+
+BRISK BRISK_Create();
+void BRISK_Close(BRISK b);
+struct KeyPoints BRISK_Detect(BRISK b, Mat src);
+struct KeyPoints BRISK_DetectAndCompute(BRISK b, Mat src, Mat mask, Mat desc);
+
+FastFeatureDetector FastFeatureDetector_Create();
+FastFeatureDetector FastFeatureDetector_CreateWithParams(int threshold, bool nonmaxSuppression, int type);
+void FastFeatureDetector_Close(FastFeatureDetector f);
+struct KeyPoints FastFeatureDetector_Detect(FastFeatureDetector f, Mat src);
+
+GFTTDetector GFTTDetector_Create();
+void GFTTDetector_Close(GFTTDetector a);
+struct KeyPoints GFTTDetector_Detect(GFTTDetector a, Mat src);
+
+KAZE KAZE_Create();
+void KAZE_Close(KAZE a);
+struct KeyPoints KAZE_Detect(KAZE a, Mat src);
+struct KeyPoints KAZE_DetectAndCompute(KAZE a, Mat src, Mat mask, Mat desc);
+
+MSER MSER_Create();
+void MSER_Close(MSER a);
+struct KeyPoints MSER_Detect(MSER a, Mat src);
+
+ORB ORB_Create();
+void ORB_Close(ORB o);
+struct KeyPoints ORB_Detect(ORB o, Mat src);
+struct KeyPoints ORB_DetectAndCompute(ORB o, Mat src, Mat mask, Mat desc);
+
+SimpleBlobDetector SimpleBlobDetector_Create();
+SimpleBlobDetector SimpleBlobDetector_Create_WithParams(SimpleBlobDetectorParams params);
+void SimpleBlobDetector_Close(SimpleBlobDetector b);
+struct KeyPoints SimpleBlobDetector_Detect(SimpleBlobDetector b, Mat src);
+SimpleBlobDetectorParams SimpleBlobDetectorParams_Create();
+
+BFMatcher BFMatcher_Create();
+BFMatcher BFMatcher_CreateWithParams(int normType, bool crossCheck);
+void BFMatcher_Close(BFMatcher b);
+struct MultiDMatches BFMatcher_KnnMatch(BFMatcher b, Mat query, Mat train, int k);
+
+void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, const Scalar s, int flags);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_FEATURES2D_H_
diff --git a/vendor/gocv.io/x/gocv/features2d_string.go b/vendor/gocv.io/x/gocv/features2d_string.go
new file mode 100644
index 0000000..21637c6
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/features2d_string.go
@@ -0,0 +1,33 @@
+package gocv
+
+/*
+#include
+#include "features2d.h"
+*/
+import "C"
+
+func (c FastFeatureDetectorType) String() string {
+ switch c {
+ case FastFeatureDetectorType58:
+ return "fast-feature-detector-type-58"
+ case FastFeatureDetectorType712:
+ return "fast-feature-detector-type-712"
+ case FastFeatureDetectorType916:
+ return "fast-feature-detector-type-916"
+ }
+ return ""
+}
+
+func (c DrawMatchesFlag) String() string {
+ switch c {
+ case DrawDefault:
+ return "draw-default"
+ case DrawOverOutImg:
+ return "draw-over-out-imt"
+ case NotDrawSinglePoints:
+ return "draw-single-points"
+ case DrawRichKeyPoints:
+ return "draw-rich-key-points"
+ }
+ return ""
+}
diff --git a/vendor/gocv.io/x/gocv/go.mod b/vendor/gocv.io/x/gocv/go.mod
new file mode 100644
index 0000000..c00a082
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/go.mod
@@ -0,0 +1,3 @@
+module gocv.io/x/gocv
+
+go 1.13
\ No newline at end of file
diff --git a/vendor/gocv.io/x/gocv/gocv.go b/vendor/gocv.io/x/gocv/gocv.go
new file mode 100644
index 0000000..c3b0126
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/gocv.go
@@ -0,0 +1,11 @@
+// Package gocv is a wrapper around the OpenCV 4.x computer vision library.
+// It provides a Go language interface to the latest version of OpenCV.
+//
+// OpenCV (Open Source Computer Vision Library: http://opencv.org) is an
+// open-source BSD-licensed library that includes several hundreds of
+// computer vision algorithms.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d1/dfb/intro.html
+//
+package gocv // import "gocv.io/x/gocv"
diff --git a/vendor/gocv.io/x/gocv/highgui.cpp b/vendor/gocv.io/x/gocv/highgui.cpp
new file mode 100644
index 0000000..db31181
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/highgui.cpp
@@ -0,0 +1,79 @@
+#include "highgui_gocv.h"
+
+// Window
+void Window_New(const char* winname, int flags) {
+ cv::namedWindow(winname, flags);
+}
+
+void Window_Close(const char* winname) {
+ cv::destroyWindow(winname);
+}
+
+void Window_IMShow(const char* winname, Mat mat) {
+ cv::imshow(winname, *mat);
+}
+
+double Window_GetProperty(const char* winname, int flag) {
+ return cv::getWindowProperty(winname, flag);
+}
+
+void Window_SetProperty(const char* winname, int flag, double value) {
+ cv::setWindowProperty(winname, flag, value);
+}
+
+void Window_SetTitle(const char* winname, const char* title) {
+ cv::setWindowTitle(winname, title);
+}
+
+int Window_WaitKey(int delay = 0) {
+ return cv::waitKey(delay);
+}
+
+void Window_Move(const char* winname, int x, int y) {
+ cv::moveWindow(winname, x, y);
+}
+
+void Window_Resize(const char* winname, int width, int height) {
+ cv::resizeWindow(winname, width, height);
+}
+
+struct Rect Window_SelectROI(const char* winname, Mat img) {
+ cv::Rect bRect = cv::selectROI(winname, *img);
+ Rect r = {bRect.x, bRect.y, bRect.width, bRect.height};
+ return r;
+}
+
+struct Rects Window_SelectROIs(const char* winname, Mat img) {
+ std::vector rois;
+ cv::selectROIs(winname, *img, rois);
+ Rect* rects = new Rect[rois.size()];
+
+ for (size_t i = 0; i < rois.size(); ++i) {
+ Rect r = {rois[i].x, rois[i].y, rois[i].width, rois[i].height};
+ rects[i] = r;
+ }
+
+ Rects ret = {rects, (int)rois.size()};
+ return ret;
+}
+
+// Trackbar
+void Trackbar_Create(const char* winname, const char* trackname, int max) {
+ cv::createTrackbar(trackname, winname, NULL, max);
+}
+
+int Trackbar_GetPos(const char* winname, const char* trackname) {
+ return cv::getTrackbarPos(trackname, winname);
+}
+
+void Trackbar_SetPos(const char* winname, const char* trackname, int pos) {
+ cv::setTrackbarPos(trackname, winname, pos);
+}
+
+void Trackbar_SetMin(const char* winname, const char* trackname, int pos) {
+ cv::setTrackbarMin(trackname, winname, pos);
+}
+
+void Trackbar_SetMax(const char* winname, const char* trackname, int pos) {
+ cv::setTrackbarMax(trackname, winname, pos);
+}
diff --git a/vendor/gocv.io/x/gocv/highgui.go b/vendor/gocv.io/x/gocv/highgui.go
new file mode 100644
index 0000000..84b8f6c
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/highgui.go
@@ -0,0 +1,323 @@
+package gocv
+
+/*
+#include
+#include "highgui_gocv.h"
+*/
+import "C"
+import (
+ "image"
+ "runtime"
+ "unsafe"
+)
+
+// Window is a wrapper around OpenCV's "HighGUI" named windows.
+// While OpenCV was designed for use in full-scale applications and can be used
+// within functionally rich UI frameworks (such as Qt*, WinForms*, or Cocoa*)
+// or without any UI at all, sometimes there it is required to try functionality
+// quickly and visualize the results. This is what the HighGUI module has been designed for.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d7/dfc/group__highgui.html
+//
+type Window struct {
+ name string
+ open bool
+}
+
+// NewWindow creates a new named OpenCV window
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d7/dfc/group__highgui.html#ga5afdf8410934fd099df85c75b2e0888b
+//
+func NewWindow(name string) *Window {
+ runtime.LockOSThread()
+
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+
+ C.Window_New(cName, 0)
+
+ return &Window{name: name, open: true}
+}
+
+// Close closes and deletes a named OpenCV Window.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d7/dfc/group__highgui.html#ga851ccdd6961022d1d5b4c4f255dbab34
+//
+func (w *Window) Close() error {
+ cName := C.CString(w.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ C.Window_Close(cName)
+ w.open = false
+
+ runtime.UnlockOSThread()
+ return nil
+}
+
+// IsOpen checks to see if the Window seems to be open.
+func (w *Window) IsOpen() bool {
+ return w.open
+}
+
+// WindowFlag value for SetWindowProperty / GetWindowProperty.
+type WindowFlag float32
+
+const (
+ // WindowNormal indicates a normal window.
+ WindowNormal WindowFlag = 0
+
+ // WindowFullscreen indicates a full-screen window.
+ WindowFullscreen = 1
+
+ // WindowAutosize indicates a window sized based on the contents.
+ WindowAutosize = 1
+
+ // WindowFreeRatio indicates allow the user to resize without maintaining aspect ratio.
+ WindowFreeRatio = 0x00000100
+
+ // WindowKeepRatio indicates always maintain an aspect ratio that matches the contents.
+ WindowKeepRatio = 0
+)
+
+// WindowPropertyFlag flags for SetWindowProperty / GetWindowProperty.
+type WindowPropertyFlag int
+
+const (
+ // WindowPropertyFullscreen fullscreen property
+ // (can be WINDOW_NORMAL or WINDOW_FULLSCREEN).
+ WindowPropertyFullscreen WindowPropertyFlag = 0
+
+ // WindowPropertyAutosize is autosize property
+ // (can be WINDOW_NORMAL or WINDOW_AUTOSIZE).
+ WindowPropertyAutosize = 1
+
+ // WindowPropertyAspectRatio window's aspect ration
+ // (can be set to WINDOW_FREERATIO or WINDOW_KEEPRATIO).
+ WindowPropertyAspectRatio = 2
+
+ // WindowPropertyOpenGL opengl support.
+ WindowPropertyOpenGL = 3
+
+ // WindowPropertyVisible or not.
+ WindowPropertyVisible = 4
+)
+
+// GetWindowProperty returns properties of a window.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#gaaf9504b8f9cf19024d9d44a14e461656
+//
+func (w *Window) GetWindowProperty(flag WindowPropertyFlag) float64 {
+ cName := C.CString(w.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ return float64(C.Window_GetProperty(cName, C.int(flag)))
+}
+
+// SetWindowProperty changes parameters of a window dynamically.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga66e4a6db4d4e06148bcdfe0d70a5df27
+//
+func (w *Window) SetWindowProperty(flag WindowPropertyFlag, value WindowFlag) {
+ cName := C.CString(w.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ C.Window_SetProperty(cName, C.int(flag), C.double(value))
+}
+
+// SetWindowTitle updates window title.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga56f8849295fd10d0c319724ddb773d96
+//
+func (w *Window) SetWindowTitle(title string) {
+ cName := C.CString(w.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ cTitle := C.CString(title)
+ defer C.free(unsafe.Pointer(cTitle))
+
+ C.Window_SetTitle(cName, cTitle)
+}
+
+// IMShow displays an image Mat in the specified window.
+// This function should be followed by the WaitKey function which displays
+// the image for specified milliseconds. Otherwise, it won't display the image.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d7/dfc/group__highgui.html#ga453d42fe4cb60e5723281a89973ee563
+//
+func (w *Window) IMShow(img Mat) {
+ cName := C.CString(w.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ C.Window_IMShow(cName, img.p)
+}
+
+// WaitKey waits for a pressed key.
+// This function is the only method in OpenCV's HighGUI that can fetch
+// and handle events, so it needs to be called periodically
+// for normal event processing
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7
+//
+func (w *Window) WaitKey(delay int) int {
+ return int(C.Window_WaitKey(C.int(delay)))
+}
+
+// MoveWindow moves window to the specified position.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga8d86b207f7211250dbe6e28f76307ffb
+//
+func (w *Window) MoveWindow(x, y int) {
+ cName := C.CString(w.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ C.Window_Move(cName, C.int(x), C.int(y))
+}
+
+// ResizeWindow resizes window to the specified size.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga9e80e080f7ef33f897e415358aee7f7e
+//
+func (w *Window) ResizeWindow(width, height int) {
+ cName := C.CString(w.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ C.Window_Resize(cName, C.int(width), C.int(height))
+}
+
+// SelectROI selects a Region Of Interest (ROI) on the given image.
+// It creates a window and allows user to select a ROI using mouse.
+//
+// Controls:
+// use space or enter to finish selection,
+// use key c to cancel selection (function will return a zero Rect).
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga8daf4730d3adf7035b6de9be4c469af5
+//
+func SelectROI(name string, img Mat) image.Rectangle {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+
+ r := C.Window_SelectROI(cName, img.p)
+ rect := image.Rect(int(r.x), int(r.y), int(r.x+r.width), int(r.y+r.height))
+ return rect
+}
+
+// SelectROIs selects multiple Regions Of Interest (ROI) on the given image.
+// It creates a window and allows user to select ROIs using mouse.
+//
+// Controls:
+// use space or enter to finish current selection and start a new one
+// use esc to terminate multiple ROI selection process
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga0f11fad74a6432b8055fb21621a0f893
+//
+func SelectROIs(name string, img Mat) []image.Rectangle {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+
+ ret := C.Window_SelectROIs(cName, img.p)
+ defer C.Rects_Close(ret)
+
+ return toRectangles(ret)
+}
+
+// WaitKey that is not attached to a specific Window.
+// Only use when no Window exists in your application, e.g. command line app.
+//
+func WaitKey(delay int) int {
+ return int(C.Window_WaitKey(C.int(delay)))
+}
+
+// Trackbar is a wrapper around OpenCV's "HighGUI" window Trackbars.
+type Trackbar struct {
+ name string
+ parent *Window
+}
+
+// CreateTrackbar creates a trackbar and attaches it to the specified window.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#gaf78d2155d30b728fc413803745b67a9b
+//
+func (w *Window) CreateTrackbar(name string, max int) *Trackbar {
+ cName := C.CString(w.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ tName := C.CString(name)
+ defer C.free(unsafe.Pointer(tName))
+
+ C.Trackbar_Create(cName, tName, C.int(max))
+ return &Trackbar{name: name, parent: w}
+}
+
+// GetPos returns the trackbar position.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga122632e9e91b9ec06943472c55d9cda8
+//
+func (t *Trackbar) GetPos() int {
+ cName := C.CString(t.parent.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ tName := C.CString(t.name)
+ defer C.free(unsafe.Pointer(tName))
+
+ return int(C.Trackbar_GetPos(cName, tName))
+}
+
+// SetPos sets the trackbar position.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga67d73c4c9430f13481fd58410d01bd8d
+//
+func (t *Trackbar) SetPos(pos int) {
+ cName := C.CString(t.parent.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ tName := C.CString(t.name)
+ defer C.free(unsafe.Pointer(tName))
+
+ C.Trackbar_SetPos(cName, tName, C.int(pos))
+}
+
+// SetMin sets the trackbar minimum position.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#gabe26ffe8d2b60cc678895595a581b7aa
+//
+func (t *Trackbar) SetMin(pos int) {
+ cName := C.CString(t.parent.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ tName := C.CString(t.name)
+ defer C.free(unsafe.Pointer(tName))
+
+ C.Trackbar_SetMin(cName, tName, C.int(pos))
+}
+
+// SetMax sets the trackbar maximum position.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/dfc/group__highgui.html#ga7e5437ccba37f1154b65210902fc4480
+//
+func (t *Trackbar) SetMax(pos int) {
+ cName := C.CString(t.parent.name)
+ defer C.free(unsafe.Pointer(cName))
+
+ tName := C.CString(t.name)
+ defer C.free(unsafe.Pointer(tName))
+
+ C.Trackbar_SetMax(cName, tName, C.int(pos))
+}
diff --git a/vendor/gocv.io/x/gocv/highgui_gocv.h b/vendor/gocv.io/x/gocv/highgui_gocv.h
new file mode 100644
index 0000000..58d9726
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/highgui_gocv.h
@@ -0,0 +1,35 @@
+#ifndef _OPENCV3_HIGHGUI_H_
+#define _OPENCV3_HIGHGUI_H_
+
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+#include "core.h"
+
+// Window
+void Window_New(const char* winname, int flags);
+void Window_Close(const char* winname);
+void Window_IMShow(const char* winname, Mat mat);
+double Window_GetProperty(const char* winname, int flag);
+void Window_SetProperty(const char* winname, int flag, double value);
+void Window_SetTitle(const char* winname, const char* title);
+int Window_WaitKey(int);
+void Window_Move(const char* winname, int x, int y);
+void Window_Resize(const char* winname, int width, int height);
+struct Rect Window_SelectROI(const char* winname, Mat img);
+struct Rects Window_SelectROIs(const char* winname, Mat img);
+
+// Trackbar
+void Trackbar_Create(const char* winname, const char* trackname, int max);
+int Trackbar_GetPos(const char* winname, const char* trackname);
+void Trackbar_SetPos(const char* winname, const char* trackname, int pos);
+void Trackbar_SetMin(const char* winname, const char* trackname, int pos);
+void Trackbar_SetMax(const char* winname, const char* trackname, int pos);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_HIGHGUI_H_
diff --git a/vendor/gocv.io/x/gocv/highgui_string.go b/vendor/gocv.io/x/gocv/highgui_string.go
new file mode 100644
index 0000000..d073bbe
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/highgui_string.go
@@ -0,0 +1,35 @@
+package gocv
+
+/*
+#include
+#include "highgui_gocv.h"
+*/
+import "C"
+
+func (c WindowFlag) String() string {
+ switch c {
+ case WindowNormal:
+ return "window-normal"
+ case WindowFullscreen:
+ return "window-fullscreen"
+ case WindowFreeRatio:
+ return "window-free-ratio"
+ }
+ return ""
+}
+
+func (c WindowPropertyFlag) String() string {
+ switch c {
+ case WindowPropertyFullscreen:
+ return "window-property-fullscreen"
+ case WindowPropertyAutosize:
+ return "window-property-autosize"
+ case WindowPropertyAspectRatio:
+ return "window-property-aspect-ratio"
+ case WindowPropertyOpenGL:
+ return "window-property-opengl"
+ case WindowPropertyVisible:
+ return "window-property-visible"
+ }
+ return ""
+}
diff --git a/vendor/gocv.io/x/gocv/imgcodecs.cpp b/vendor/gocv.io/x/gocv/imgcodecs.cpp
new file mode 100644
index 0000000..1f3afe5
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/imgcodecs.cpp
@@ -0,0 +1,46 @@
+#include "imgcodecs.h"
+
+// Image
+Mat Image_IMRead(const char* filename, int flags) {
+ cv::Mat img = cv::imread(filename, flags);
+ return new cv::Mat(img);
+}
+
+
+bool Image_IMWrite(const char* filename, Mat img) {
+ return cv::imwrite(filename, *img);
+}
+
+bool Image_IMWrite_WithParams(const char* filename, Mat img, IntVector params) {
+ std::vector compression_params;
+
+ for (int i = 0, *v = params.val; i < params.length; ++v, ++i) {
+ compression_params.push_back(*v);
+ }
+
+ return cv::imwrite(filename, *img, compression_params);
+}
+
+struct ByteArray Image_IMEncode(const char* fileExt, Mat img) {
+ std::vector data;
+ cv::imencode(fileExt, *img, data);
+ return toByteArray(reinterpret_cast(&data[0]), data.size());
+}
+
+struct ByteArray Image_IMEncode_WithParams(const char* fileExt, Mat img, IntVector params) {
+ std::vector data;
+ std::vector compression_params;
+
+ for (int i = 0, *v = params.val; i < params.length; ++v, ++i) {
+ compression_params.push_back(*v);
+ }
+
+ cv::imencode(fileExt, *img, data, compression_params);
+ return toByteArray(reinterpret_cast(&data[0]), data.size());
+}
+
+Mat Image_IMDecode(ByteArray buf, int flags) {
+ std::vector data(buf.data, buf.data + buf.length);
+ cv::Mat img = cv::imdecode(data, flags);
+ return new cv::Mat(img);
+}
diff --git a/vendor/gocv.io/x/gocv/imgcodecs.go b/vendor/gocv.io/x/gocv/imgcodecs.go
new file mode 100644
index 0000000..dea467f
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/imgcodecs.go
@@ -0,0 +1,248 @@
+package gocv
+
+/*
+#include
+#include "imgcodecs.h"
+*/
+import "C"
+import (
+ "unsafe"
+)
+
+// IMReadFlag is one of the valid flags to use for the IMRead function.
+type IMReadFlag int
+
+const (
+ // IMReadUnchanged return the loaded image as is (with alpha channel,
+ // otherwise it gets cropped).
+ IMReadUnchanged IMReadFlag = -1
+
+ // IMReadGrayScale always convert image to the single channel
+ // grayscale image.
+ IMReadGrayScale = 0
+
+ // IMReadColor always converts image to the 3 channel BGR color image.
+ IMReadColor = 1
+
+ // IMReadAnyDepth returns 16-bit/32-bit image when the input has the corresponding
+ // depth, otherwise convert it to 8-bit.
+ IMReadAnyDepth = 2
+
+ // IMReadAnyColor the image is read in any possible color format.
+ IMReadAnyColor = 4
+
+ // IMReadLoadGDAL uses the gdal driver for loading the image.
+ IMReadLoadGDAL = 8
+
+ // IMReadReducedGrayscale2 always converts image to the single channel grayscale image
+ // and the image size reduced 1/2.
+ IMReadReducedGrayscale2 = 16
+
+ // IMReadReducedColor2 always converts image to the 3 channel BGR color image and the
+ // image size reduced 1/2.
+ IMReadReducedColor2 = 17
+
+ // IMReadReducedGrayscale4 always converts image to the single channel grayscale image and
+ // the image size reduced 1/4.
+ IMReadReducedGrayscale4 = 32
+
+ // IMReadReducedColor4 always converts image to the 3 channel BGR color image and
+ // the image size reduced 1/4.
+ IMReadReducedColor4 = 33
+
+ // IMReadReducedGrayscale8 always convert image to the single channel grayscale image and
+ // the image size reduced 1/8.
+ IMReadReducedGrayscale8 = 64
+
+ // IMReadReducedColor8 always convert image to the 3 channel BGR color image and the
+ // image size reduced 1/8.
+ IMReadReducedColor8 = 65
+
+ // IMReadIgnoreOrientation do not rotate the image according to EXIF's orientation flag.
+ IMReadIgnoreOrientation = 128
+
+ //IMWriteJpegQuality is the quality from 0 to 100 for JPEG (the higher is the better). Default value is 95.
+ IMWriteJpegQuality = 1
+
+ // IMWriteJpegProgressive enables JPEG progressive feature, 0 or 1, default is False.
+ IMWriteJpegProgressive = 2
+
+ // IMWriteJpegOptimize enables JPEG optimization, 0 or 1, default is False.
+ IMWriteJpegOptimize = 3
+
+ // IMWriteJpegRstInterval is the JPEG restart interval, 0 - 65535, default is 0 - no restart.
+ IMWriteJpegRstInterval = 4
+
+ // IMWriteJpegLumaQuality separates luma quality level, 0 - 100, default is 0 - don't use.
+ IMWriteJpegLumaQuality = 5
+
+ // IMWriteJpegChromaQuality separates chroma quality level, 0 - 100, default is 0 - don't use.
+ IMWriteJpegChromaQuality = 6
+
+ // IMWritePngCompression is the compression level from 0 to 9 for PNG. A
+ // higher value means a smaller size and longer compression time.
+ // If specified, strategy is changed to IMWRITE_PNG_STRATEGY_DEFAULT (Z_DEFAULT_STRATEGY).
+ // Default value is 1 (best speed setting).
+ IMWritePngCompression = 16
+
+ // IMWritePngStrategy is one of cv::IMWritePNGFlags, default is IMWRITE_PNG_STRATEGY_RLE.
+ IMWritePngStrategy = 17
+
+ // IMWritePngBilevel is the binary level PNG, 0 or 1, default is 0.
+ IMWritePngBilevel = 18
+
+ // IMWritePxmBinary for PPM, PGM, or PBM can be a binary format flag, 0 or 1. Default value is 1.
+ IMWritePxmBinary = 32
+
+ // IMWriteWebpQuality is the quality from 1 to 100 for WEBP (the higher is
+ // the better). By default (without any parameter) and for quality above
+ // 100 the lossless compression is used.
+ IMWriteWebpQuality = 64
+
+ // IMWritePamTupletype sets the TUPLETYPE field to the corresponding string
+ // value that is defined for the format.
+ IMWritePamTupletype = 128
+
+ // IMWritePngStrategyDefault is the value to use for normal data.
+ IMWritePngStrategyDefault = 0
+
+ // IMWritePngStrategyFiltered is the value to use for data produced by a
+ // filter (or predictor). Filtered data consists mostly of small values
+ // with a somewhat random distribution. In this case, the compression
+ // algorithm is tuned to compress them better.
+ IMWritePngStrategyFiltered = 1
+
+ // IMWritePngStrategyHuffmanOnly forces Huffman encoding only (no string match).
+ IMWritePngStrategyHuffmanOnly = 2
+
+ // IMWritePngStrategyRle is the value to use to limit match distances to
+ // one (run-length encoding).
+ IMWritePngStrategyRle = 3
+
+ // IMWritePngStrategyFixed is the value to prevent the use of dynamic
+ // Huffman codes, allowing for a simpler decoder for special applications.
+ IMWritePngStrategyFixed = 4
+)
+
+// IMRead reads an image from a file into a Mat.
+// The flags param is one of the IMReadFlag flags.
+// If the image cannot be read (because of missing file, improper permissions,
+// unsupported or invalid format), the function returns an empty Mat.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56
+//
+func IMRead(name string, flags IMReadFlag) Mat {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+
+ return newMat(C.Image_IMRead(cName, C.int(flags)))
+}
+
+// IMWrite writes a Mat to an image file.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce
+//
+func IMWrite(name string, img Mat) bool {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+
+ return bool(C.Image_IMWrite(cName, img.p))
+}
+
+// IMWriteWithParams writes a Mat to an image file. With that func you can
+// pass compression parameters.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce
+//
+func IMWriteWithParams(name string, img Mat, params []int) bool {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+
+ cparams := []C.int{}
+
+ for _, v := range params {
+ cparams = append(cparams, C.int(v))
+ }
+
+ paramsVector := C.struct_IntVector{}
+ paramsVector.val = (*C.int)(&cparams[0])
+ paramsVector.length = (C.int)(len(cparams))
+
+ return bool(C.Image_IMWrite_WithParams(cName, img.p, paramsVector))
+}
+
+// FileExt represents a file extension.
+type FileExt string
+
+const (
+ // PNGFileExt is the file extension for PNG.
+ PNGFileExt FileExt = ".png"
+ // JPEGFileExt is the file extension for JPEG.
+ JPEGFileExt FileExt = ".jpg"
+ // GIFFileExt is the file extension for GIF.
+ GIFFileExt FileExt = ".gif"
+)
+
+// IMEncode encodes an image Mat into a memory buffer.
+// This function compresses the image and stores it in the returned memory buffer,
+// using the image format passed in in the form of a file extension string.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga461f9ac09887e47797a54567df3b8b63
+//
+func IMEncode(fileExt FileExt, img Mat) (buf []byte, err error) {
+ cfileExt := C.CString(string(fileExt))
+ defer C.free(unsafe.Pointer(cfileExt))
+
+ b := C.Image_IMEncode(cfileExt, img.Ptr())
+ defer C.ByteArray_Release(b)
+ return toGoBytes(b), nil
+}
+
+// IMEncodeWithParams encodes an image Mat into a memory buffer.
+// This function compresses the image and stores it in the returned memory buffer,
+// using the image format passed in in the form of a file extension string.
+//
+// Usage example:
+// buffer, err := gocv.IMEncodeWithParams(gocv.JPEGFileExt, img, []int{gocv.IMWriteJpegQuality, quality})
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga461f9ac09887e47797a54567df3b8b63
+//
+func IMEncodeWithParams(fileExt FileExt, img Mat, params []int) (buf []byte, err error) {
+ cfileExt := C.CString(string(fileExt))
+ defer C.free(unsafe.Pointer(cfileExt))
+
+ cparams := []C.int{}
+
+ for _, v := range params {
+ cparams = append(cparams, C.int(v))
+ }
+
+ paramsVector := C.struct_IntVector{}
+ paramsVector.val = (*C.int)(&cparams[0])
+ paramsVector.length = (C.int)(len(cparams))
+
+ b := C.Image_IMEncode_WithParams(cfileExt, img.Ptr(), paramsVector)
+ defer C.ByteArray_Release(b)
+ return toGoBytes(b), nil
+}
+
+// IMDecode reads an image from a buffer in memory.
+// The function IMDecode reads an image from the specified buffer in memory.
+// If the buffer is too short or contains invalid data, the function
+// returns an empty matrix.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga26a67788faa58ade337f8d28ba0eb19e
+//
+func IMDecode(buf []byte, flags IMReadFlag) (Mat, error) {
+ data, err := toByteArray(buf)
+ if err != nil {
+ return Mat{}, err
+ }
+ return newMat(C.Image_IMDecode(*data, C.int(flags))), nil
+}
diff --git a/vendor/gocv.io/x/gocv/imgcodecs.h b/vendor/gocv.io/x/gocv/imgcodecs.h
new file mode 100644
index 0000000..ac4ad11
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/imgcodecs.h
@@ -0,0 +1,24 @@
+#ifndef _OPENCV3_IMGCODECS_H_
+#define _OPENCV3_IMGCODECS_H_
+
+#include
+
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+#include "core.h"
+
+Mat Image_IMRead(const char* filename, int flags);
+bool Image_IMWrite(const char* filename, Mat img);
+bool Image_IMWrite_WithParams(const char* filename, Mat img, IntVector params);
+struct ByteArray Image_IMEncode(const char* fileExt, Mat img);
+struct ByteArray Image_IMEncode_WithParams(const char* fileExt, Mat img, IntVector params);
+Mat Image_IMDecode(ByteArray buf, int flags);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_IMGCODECS_H_
diff --git a/vendor/gocv.io/x/gocv/imgproc.cpp b/vendor/gocv.io/x/gocv/imgproc.cpp
new file mode 100644
index 0000000..77b1697
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/imgproc.cpp
@@ -0,0 +1,627 @@
+#include "imgproc.h"
+
+double ArcLength(Contour curve, bool is_closed) {
+ std::vector pts;
+
+ for (size_t i = 0; i < curve.length; i++) {
+ pts.push_back(cv::Point(curve.points[i].x, curve.points[i].y));
+ }
+
+ return cv::arcLength(pts, is_closed);
+}
+
+Contour ApproxPolyDP(Contour curve, double epsilon, bool closed) {
+ std::vector curvePts;
+
+ for (size_t i = 0; i < curve.length; i++) {
+ curvePts.push_back(cv::Point(curve.points[i].x, curve.points[i].y));
+ }
+
+ std::vector approxCurvePts;
+ cv::approxPolyDP(curvePts, approxCurvePts, epsilon, closed);
+
+ int length = approxCurvePts.size();
+ Point* points = new Point[length];
+
+ for (size_t i = 0; i < length; i++) {
+ points[i] = (Point){approxCurvePts[i].x, approxCurvePts[i].y};
+ }
+
+ return (Contour){points, length};
+}
+
+void CvtColor(Mat src, Mat dst, int code) {
+ cv::cvtColor(*src, *dst, code);
+}
+
+void EqualizeHist(Mat src, Mat dst) {
+ cv::equalizeHist(*src, *dst);
+}
+
+void CalcHist(struct Mats mats, IntVector chans, Mat mask, Mat hist, IntVector sz, FloatVector rng, bool acc) {
+ std::vector images;
+
+ for (int i = 0; i < mats.length; ++i) {
+ images.push_back(*mats.mats[i]);
+ }
+
+ std::vector channels;
+
+ for (int i = 0, *v = chans.val; i < chans.length; ++v, ++i) {
+ channels.push_back(*v);
+ }
+
+ std::vector histSize;
+
+ for (int i = 0, *v = sz.val; i < sz.length; ++v, ++i) {
+ histSize.push_back(*v);
+ }
+
+ std::vector ranges;
+
+ float* f;
+ int i;
+ for (i = 0, f = rng.val; i < rng.length; ++f, ++i) {
+ ranges.push_back(*f);
+ }
+
+ cv::calcHist(images, channels, *mask, *hist, histSize, ranges, acc);
+}
+
+void CalcBackProject(struct Mats mats, IntVector chans, Mat hist, Mat backProject, FloatVector rng, bool uniform){
+ std::vector images;
+
+ for (int i = 0; i < mats.length; ++i) {
+ images.push_back(*mats.mats[i]);
+ }
+
+ std::vector channels;
+ for (int i = 0, *v = chans.val; i < chans.length; ++v, ++i) {
+ channels.push_back(*v);
+ }
+
+ std::vector ranges;
+
+ float* f;
+ int i;
+ for (i = 0, f = rng.val; i < rng.length; ++f, ++i) {
+ ranges.push_back(*f);
+ }
+
+ cv::calcBackProject(images, channels, *hist, *backProject, ranges, uniform);
+}
+
+double CompareHist(Mat hist1, Mat hist2, int method) {
+ return cv::compareHist(*hist1, *hist2, method);
+}
+
+struct RotatedRect FitEllipse(Points points)
+{
+ Point *rpts = new Point[points.length];
+ std::vector pts;
+
+ for (size_t i = 0; i < points.length; i++)
+ {
+ pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
+ Point pt = {points.points[i].x, points.points[i].y};
+ rpts[i] = pt;
+ }
+
+ cv::RotatedRect bRect = cv::fitEllipse(pts);
+
+ Rect r = {bRect.boundingRect().x, bRect.boundingRect().y, bRect.boundingRect().width, bRect.boundingRect().height};
+ Point centrpt = {int(lroundf(bRect.center.x)), int(lroundf(bRect.center.y))};
+ Size szsz = {int(lroundf(bRect.size.width)), int(lroundf(bRect.size.height))};
+
+ RotatedRect rotRect = {(Contour){rpts, 4}, r, centrpt, szsz, bRect.angle};
+ return rotRect;
+}
+
+void ConvexHull(Contour points, Mat hull, bool clockwise, bool returnPoints) {
+ std::vector pts;
+
+ for (size_t i = 0; i < points.length; i++) {
+ pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
+ }
+
+ cv::convexHull(pts, *hull, clockwise, returnPoints);
+}
+
+void ConvexityDefects(Contour points, Mat hull, Mat result) {
+ std::vector pts;
+
+ for (size_t i = 0; i < points.length; i++) {
+ pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
+ }
+
+ cv::convexityDefects(pts, *hull, *result);
+}
+
+void BilateralFilter(Mat src, Mat dst, int d, double sc, double ss) {
+ cv::bilateralFilter(*src, *dst, d, sc, ss);
+}
+
+void Blur(Mat src, Mat dst, Size ps) {
+ cv::Size sz(ps.width, ps.height);
+ cv::blur(*src, *dst, sz);
+}
+
+void BoxFilter(Mat src, Mat dst, int ddepth, Size ps) {
+ cv::Size sz(ps.width, ps.height);
+ cv::boxFilter(*src, *dst, ddepth, sz);
+}
+
+void SqBoxFilter(Mat src, Mat dst, int ddepth, Size ps) {
+ cv::Size sz(ps.width, ps.height);
+ cv::sqrBoxFilter(*src, *dst, ddepth, sz);
+}
+
+void Dilate(Mat src, Mat dst, Mat kernel) {
+ cv::dilate(*src, *dst, *kernel);
+}
+
+void DistanceTransform(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType) {
+ cv::distanceTransform(*src, *dst, *labels, distanceType, maskSize, labelType);
+}
+
+void Erode(Mat src, Mat dst, Mat kernel) {
+ cv::erode(*src, *dst, *kernel);
+}
+
+void MatchTemplate(Mat image, Mat templ, Mat result, int method, Mat mask) {
+ cv::matchTemplate(*image, *templ, *result, method, *mask);
+}
+
+struct Moment Moments(Mat src, bool binaryImage) {
+ cv::Moments m = cv::moments(*src, binaryImage);
+ Moment mom = {m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03,
+ m.mu20, m.mu11, m.mu02, m.mu30, m.mu21, m.mu12, m.mu03,
+ m.nu20, m.nu11, m.nu02, m.nu30, m.nu21, m.nu12, m.nu03
+ };
+ return mom;
+}
+
+void PyrDown(Mat src, Mat dst, Size size, int borderType) {
+ cv::Size cvSize(size.width, size.height);
+ cv::pyrDown(*src, *dst, cvSize, borderType);
+}
+
+void PyrUp(Mat src, Mat dst, Size size, int borderType) {
+ cv::Size cvSize(size.width, size.height);
+ cv::pyrUp(*src, *dst, cvSize, borderType);
+}
+
+struct Rect BoundingRect(Contour con) {
+ std::vector pts;
+
+ for (size_t i = 0; i < con.length; i++) {
+ pts.push_back(cv::Point(con.points[i].x, con.points[i].y));
+ }
+
+ cv::Rect bRect = cv::boundingRect(pts);
+ Rect r = {bRect.x, bRect.y, bRect.width, bRect.height};
+ return r;
+}
+
+void BoxPoints(RotatedRect rect, Mat boxPts){
+ cv::Point2f centerPt(rect.center.x , rect.center.y);
+ cv::Size2f rSize(rect.size.width, rect.size.height);
+ cv::RotatedRect rotatedRectangle(centerPt, rSize, rect.angle);
+ cv::boxPoints(rotatedRectangle, *boxPts);
+}
+
+double ContourArea(Contour con) {
+ std::vector pts;
+
+ for (size_t i = 0; i < con.length; i++) {
+ pts.push_back(cv::Point(con.points[i].x, con.points[i].y));
+ }
+
+ return cv::contourArea(pts);
+}
+
+struct RotatedRect MinAreaRect(Points points){
+ std::vector pts;
+
+ for (size_t i = 0; i < points.length; i++) {
+ pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
+ }
+
+ cv::RotatedRect cvrect = cv::minAreaRect(pts);
+
+ Point* rpts = new Point[4];
+ cv::Point2f* pts4 = new cv::Point2f[4];
+ cvrect.points(pts4);
+
+ for (size_t j = 0; j < 4; j++) {
+ Point pt = {int(lroundf(pts4[j].x)), int(lroundf(pts4[j].y))};
+ rpts[j] = pt;
+ }
+
+ delete[] pts4;
+
+ cv::Rect bRect = cvrect.boundingRect();
+ Rect r = {bRect.x, bRect.y, bRect.width, bRect.height};
+ Point centrpt = {int(lroundf(cvrect.center.x)), int(lroundf(cvrect.center.y))};
+ Size szsz = {int(lroundf(cvrect.size.width)), int(lroundf(cvrect.size.height))};
+
+ RotatedRect retrect = {(Contour){rpts, 4}, r, centrpt, szsz, cvrect.angle};
+ return retrect;
+}
+
+void MinEnclosingCircle(Points points, Point2f* center, float* radius){
+ std::vector pts;
+
+ for (size_t i = 0; i < points.length; i++) {
+ pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
+ }
+
+ cv::Point2f center2f;
+ cv::minEnclosingCircle(pts, center2f, *radius);
+ center->x = center2f.x;
+ center->y = center2f.y;
+}
+
+struct Contours FindContours(Mat src, int mode, int method) {
+ std::vector > contours;
+ cv::findContours(*src, contours, mode, method);
+
+ Contour* points = new Contour[contours.size()];
+
+ for (size_t i = 0; i < contours.size(); i++) {
+ Point* pts = new Point[contours[i].size()];
+
+ for (size_t j = 0; j < contours[i].size(); j++) {
+ Point pt = {contours[i][j].x, contours[i][j].y};
+ pts[j] = pt;
+ }
+
+ points[i] = (Contour){pts, (int)contours[i].size()};
+ }
+
+ Contours cons = {points, (int)contours.size()};
+ return cons;
+}
+
+int ConnectedComponents(Mat src, Mat labels, int connectivity, int ltype, int ccltype){
+ return cv::connectedComponents(*src, *labels, connectivity, ltype, ccltype);
+}
+
+
+int ConnectedComponentsWithStats(Mat src, Mat labels, Mat stats, Mat centroids,
+ int connectivity, int ltype, int ccltype){
+ return cv::connectedComponentsWithStats(*src, *labels, *stats, *centroids, connectivity, ltype, ccltype);
+}
+
+Mat GetStructuringElement(int shape, Size ksize) {
+ cv::Size sz(ksize.width, ksize.height);
+ return new cv::Mat(cv::getStructuringElement(shape, sz));
+}
+
+Scalar MorphologyDefaultBorderValue(){
+ cv::Scalar cs = cv::morphologyDefaultBorderValue();
+ return (Scalar){cs[0],cs[1],cs[2],cs[3]};
+}
+
+void MorphologyEx(Mat src, Mat dst, int op, Mat kernel) {
+ cv::morphologyEx(*src, *dst, op, *kernel);
+}
+
+void MorphologyExWithParams(Mat src, Mat dst, int op, Mat kernel, Point pt, int iterations, int borderType) {
+ cv::Point pt1(pt.x, pt.y);
+ cv::morphologyEx(*src, *dst, op, *kernel, pt1, iterations, borderType);
+}
+
+void GaussianBlur(Mat src, Mat dst, Size ps, double sX, double sY, int bt) {
+ cv::Size sz(ps.width, ps.height);
+ cv::GaussianBlur(*src, *dst, sz, sX, sY, bt);
+}
+
+void Laplacian(Mat src, Mat dst, int dDepth, int kSize, double scale, double delta,
+ int borderType) {
+ cv::Laplacian(*src, *dst, dDepth, kSize, scale, delta, borderType);
+}
+
+void Scharr(Mat src, Mat dst, int dDepth, int dx, int dy, double scale, double delta,
+ int borderType) {
+ cv::Scharr(*src, *dst, dDepth, dx, dy, scale, delta, borderType);
+}
+
+void MedianBlur(Mat src, Mat dst, int ksize) {
+ cv::medianBlur(*src, *dst, ksize);
+}
+
+void Canny(Mat src, Mat edges, double t1, double t2) {
+ cv::Canny(*src, *edges, t1, t2);
+}
+
+void CornerSubPix(Mat img, Mat corners, Size winSize, Size zeroZone, TermCriteria criteria) {
+ cv::Size wsz(winSize.width, winSize.height);
+ cv::Size zsz(zeroZone.width, zeroZone.height);
+ cv::cornerSubPix(*img, *corners, wsz, zsz, *criteria);
+}
+
+void GoodFeaturesToTrack(Mat img, Mat corners, int maxCorners, double quality, double minDist) {
+ cv::goodFeaturesToTrack(*img, *corners, maxCorners, quality, minDist);
+}
+
+void GrabCut(Mat img, Mat mask, Rect r, Mat bgdModel, Mat fgdModel, int iterCount, int mode) {
+ cv::Rect cvRect = cv::Rect(r.x, r.y, r.width, r.height);
+ cv::grabCut(*img, *mask, cvRect, *bgdModel, *fgdModel, iterCount, mode);
+}
+
+void HoughCircles(Mat src, Mat circles, int method, double dp, double minDist) {
+ cv::HoughCircles(*src, *circles, method, dp, minDist);
+}
+
+void HoughCirclesWithParams(Mat src, Mat circles, int method, double dp, double minDist,
+ double param1, double param2, int minRadius, int maxRadius) {
+ cv::HoughCircles(*src, *circles, method, dp, minDist, param1, param2, minRadius, maxRadius);
+}
+
+void HoughLines(Mat src, Mat lines, double rho, double theta, int threshold) {
+ cv::HoughLines(*src, *lines, rho, theta, threshold);
+}
+
+void HoughLinesP(Mat src, Mat lines, double rho, double theta, int threshold) {
+ cv::HoughLinesP(*src, *lines, rho, theta, threshold);
+}
+
+void HoughLinesPWithParams(Mat src, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap) {
+ cv::HoughLinesP(*src, *lines, rho, theta, threshold, minLineLength, maxLineGap);
+}
+
+void HoughLinesPointSet(Mat points, Mat lines, int linesMax, int threshold,
+ double minRho, double maxRho, double rhoStep,
+ double minTheta, double maxTheta, double thetaStep) {
+ cv::HoughLinesPointSet(*points, *lines, linesMax, threshold,
+ minRho, maxRho, rhoStep, minTheta, maxTheta, thetaStep );
+}
+
+void Integral(Mat src, Mat sum, Mat sqsum, Mat tilted) {
+ cv::integral(*src, *sum, *sqsum, *tilted);
+}
+
+void Threshold(Mat src, Mat dst, double thresh, double maxvalue, int typ) {
+ cv::threshold(*src, *dst, thresh, maxvalue, typ);
+}
+
+void AdaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveMethod, int thresholdType,
+ int blockSize, double c) {
+ cv::adaptiveThreshold(*src, *dst, maxValue, adaptiveMethod, thresholdType, blockSize, c);
+}
+
+void ArrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
+ cv::Point p1(pt1.x, pt1.y);
+ cv::Point p2(pt2.x, pt2.y);
+ cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
+
+ cv::arrowedLine(*img, p1, p2, c, thickness);
+}
+
+bool ClipLine(Size imgSize, Point pt1, Point pt2) {
+ cv::Size sz(imgSize.width, imgSize.height);
+ cv::Point p1(pt1.x, pt1.y);
+ cv::Point p2(pt2.x, pt2.y);
+
+ return cv::clipLine(sz, p1, p2);
+}
+
+void Circle(Mat img, Point center, int radius, Scalar color, int thickness) {
+ cv::Point p1(center.x, center.y);
+ cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
+
+ cv::circle(*img, p1, radius, c, thickness);
+}
+
+void Ellipse(Mat img, Point center, Point axes, double angle, double
+ startAngle, double endAngle, Scalar color, int thickness) {
+ cv::Point p1(center.x, center.y);
+ cv::Point p2(axes.x, axes.y);
+ cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
+
+ cv::ellipse(*img, p1, p2, angle, startAngle, endAngle, c, thickness);
+}
+
+void Line(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
+ cv::Point p1(pt1.x, pt1.y);
+ cv::Point p2(pt2.x, pt2.y);
+ cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
+
+ cv::line(*img, p1, p2, c, thickness);
+}
+
+void Rectangle(Mat img, Rect r, Scalar color, int thickness) {
+ cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
+ cv::rectangle(
+ *img,
+ cv::Point(r.x, r.y),
+ cv::Point(r.x + r.width, r.y + r.height),
+ c,
+ thickness,
+ cv::LINE_AA
+ );
+}
+
+void FillPoly(Mat img, Contours points, Scalar color) {
+ std::vector > pts;
+
+ for (size_t i = 0; i < points.length; i++) {
+ Contour contour = points.contours[i];
+
+ std::vector cntr;
+
+ for (size_t i = 0; i < contour.length; i++) {
+ cntr.push_back(cv::Point(contour.points[i].x, contour.points[i].y));
+ }
+
+ pts.push_back(cntr);
+ }
+
+ cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
+
+ cv::fillPoly(*img, pts, c);
+}
+
+struct Size GetTextSize(const char* text, int fontFace, double fontScale, int thickness) {
+ cv::Size sz = cv::getTextSize(text, fontFace, fontScale, thickness, NULL);
+ Size size = {sz.width, sz.height};
+ return size;
+}
+
+void PutText(Mat img, const char* text, Point org, int fontFace, double fontScale,
+ Scalar color, int thickness) {
+ cv::Point pt(org.x, org.y);
+ cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
+ cv::putText(*img, text, pt, fontFace, fontScale, c, thickness);
+}
+
+void PutTextWithParams(Mat img, const char* text, Point org, int fontFace, double fontScale,
+ Scalar color, int thickness, int lineType, bool bottomLeftOrigin) {
+ cv::Point pt(org.x, org.y);
+ cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
+ cv::putText(*img, text, pt, fontFace, fontScale, c, thickness, lineType, bottomLeftOrigin);
+}
+
+void Resize(Mat src, Mat dst, Size dsize, double fx, double fy, int interp) {
+ cv::Size sz(dsize.width, dsize.height);
+ cv::resize(*src, *dst, sz, fx, fy, interp);
+}
+
+void GetRectSubPix(Mat src, Size patchSize, Point center, Mat dst) {
+ cv::Size sz(patchSize.width, patchSize.height);
+ cv::Point pt(center.x, center.y);
+ cv::getRectSubPix(*src, sz, pt, *dst);
+}
+
+Mat GetRotationMatrix2D(Point center, double angle, double scale) {
+ cv::Point pt(center.x, center.y);
+ return new cv::Mat(cv::getRotationMatrix2D(pt, angle, scale));
+}
+
+void WarpAffine(Mat src, Mat dst, Mat m, Size dsize) {
+ cv::Size sz(dsize.width, dsize.height);
+ cv::warpAffine(*src, *dst, *m, sz);
+}
+
+void WarpAffineWithParams(Mat src, Mat dst, Mat rot_mat, Size dsize, int flags, int borderMode,
+ Scalar borderValue) {
+ cv::Size sz(dsize.width, dsize.height);
+ cv::Scalar c = cv::Scalar(borderValue.val1, borderValue.val2, borderValue.val3, borderValue.val4);
+ cv::warpAffine(*src, *dst, *rot_mat, sz, flags, borderMode, c);
+}
+
+void WarpPerspective(Mat src, Mat dst, Mat m, Size dsize) {
+ cv::Size sz(dsize.width, dsize.height);
+ cv::warpPerspective(*src, *dst, *m, sz);
+}
+
+void Watershed(Mat image, Mat markers) {
+ cv::watershed(*image, *markers);
+}
+
+void ApplyColorMap(Mat src, Mat dst, int colormap) {
+ cv::applyColorMap(*src, *dst, colormap);
+}
+
+void ApplyCustomColorMap(Mat src, Mat dst, Mat colormap) {
+ cv::applyColorMap(*src, *dst, *colormap);
+}
+
+Mat GetPerspectiveTransform(Contour src, Contour dst) {
+ std::vector src_pts;
+
+ for (size_t i = 0; i < src.length; i++) {
+ src_pts.push_back(cv::Point2f(src.points[i].x, src.points[i].y));
+ }
+
+ std::vector dst_pts;
+
+ for (size_t i = 0; i < dst.length; i++) {
+ dst_pts.push_back(cv::Point2f(dst.points[i].x, dst.points[i].y));
+ }
+
+ return new cv::Mat(cv::getPerspectiveTransform(src_pts, dst_pts));
+}
+
+void DrawContours(Mat src, Contours contours, int contourIdx, Scalar color, int thickness) {
+ std::vector > cntrs;
+
+ for (size_t i = 0; i < contours.length; i++) {
+ Contour contour = contours.contours[i];
+
+ std::vector cntr;
+
+ for (size_t i = 0; i < contour.length; i++) {
+ cntr.push_back(cv::Point(contour.points[i].x, contour.points[i].y));
+ }
+
+ cntrs.push_back(cntr);
+ }
+
+ cv::Scalar c = cv::Scalar(color.val1, color.val2, color.val3, color.val4);
+ cv::drawContours(*src, cntrs, contourIdx, c, thickness);
+}
+
+void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType) {
+ cv::Sobel(*src, *dst, ddepth, dx, dy, ksize, scale, delta, borderType);
+}
+
+void SpatialGradient(Mat src, Mat dx, Mat dy, int ksize, int borderType) {
+ cv::spatialGradient(*src, *dx, *dy, ksize, borderType);
+}
+
+
+void Remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue) {
+ cv::Scalar c = cv::Scalar(borderValue.val1, borderValue.val2, borderValue.val3, borderValue.val4);
+ cv::remap(*src, *dst, *map1, *map2, interpolation, borderMode, c);
+}
+
+void Filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType) {
+ cv::Point anchorPt(anchor.x, anchor.y);
+ cv::filter2D(*src, *dst, ddepth, *kernel, anchorPt, delta, borderType);
+}
+
+void SepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType) {
+ cv::Point anchorPt(anchor.x, anchor.y);
+ cv::sepFilter2D(*src, *dst, ddepth, *kernelX, *kernelY, anchorPt, delta, borderType);
+}
+
+void LogPolar(Mat src, Mat dst, Point center, double m, int flags) {
+ cv::Point2f centerPt(center.x, center.y);
+ cv::logPolar(*src, *dst, centerPt, m, flags);
+}
+
+void FitLine(Contour points, Mat line, int distType, double param, double reps, double aeps) {
+ std::vector pts;
+ for (size_t i = 0; i < points.length; i++) {
+ pts.push_back(cv::Point(points.points[i].x, points.points[i].y));
+ }
+ cv::fitLine(pts, *line, distType, param, reps, aeps);
+}
+
+void LinearPolar(Mat src, Mat dst, Point center, double maxRadius, int flags) {
+ cv::Point2f centerPt(center.x, center.y);
+ cv::linearPolar(*src, *dst, centerPt, maxRadius, flags);
+}
+
+CLAHE CLAHE_Create() {
+ return new cv::Ptr(cv::createCLAHE());
+}
+
+CLAHE CLAHE_CreateWithParams(double clipLimit, Size tileGridSize) {
+ cv::Size sz(tileGridSize.width, tileGridSize.height);
+ return new cv::Ptr(cv::createCLAHE(clipLimit, sz));
+}
+
+void CLAHE_Close(CLAHE c) {
+ delete c;
+}
+
+void CLAHE_Apply(CLAHE c, Mat src, Mat dst) {
+ (*c)->apply(*src, *dst);
+}
+
+void InvertAffineTransform(Mat src, Mat dst) {
+ cv::invertAffineTransform(*src, *dst);
+}
diff --git a/vendor/gocv.io/x/gocv/imgproc.go b/vendor/gocv.io/x/gocv/imgproc.go
new file mode 100644
index 0000000..7102ab1
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/imgproc.go
@@ -0,0 +1,1790 @@
+package gocv
+
+/*
+#include
+#include "imgproc.h"
+*/
+import "C"
+import (
+ "image"
+ "image/color"
+ "reflect"
+ "unsafe"
+)
+
+func getPoints(pts *C.Point, l int) []C.Point {
+ h := &reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(pts)),
+ Len: l,
+ Cap: l,
+ }
+ return *(*[]C.Point)(unsafe.Pointer(h))
+}
+
+// ArcLength calculates a contour perimeter or a curve length.
+//
+// For further details, please see:
+//
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8d26483c636be6b35c3ec6335798a47c
+//
+func ArcLength(curve []image.Point, isClosed bool) float64 {
+ cPoints := toCPoints(curve)
+ arcLength := C.ArcLength(cPoints, C.bool(isClosed))
+ return float64(arcLength)
+}
+
+// ApproxPolyDP approximates a polygonal curve(s) with the specified precision.
+//
+// For further details, please see:
+//
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga0012a5fdaea70b8a9970165d98722b4c
+//
+func ApproxPolyDP(curve []image.Point, epsilon float64, closed bool) (approxCurve []image.Point) {
+ cCurve := toCPoints(curve)
+
+ cApproxCurve := C.ApproxPolyDP(cCurve, C.double(epsilon), C.bool(closed))
+ defer C.Points_Close(cApproxCurve)
+
+ cApproxCurvePoints := getPoints(cApproxCurve.points, int(cApproxCurve.length))
+
+ approxCurve = make([]image.Point, cApproxCurve.length)
+ for i, cPoint := range cApproxCurvePoints {
+ approxCurve[i] = image.Pt(int(cPoint.x), int(cPoint.y))
+ }
+ return approxCurve
+}
+
+// ConvexHull finds the convex hull of a point set.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga014b28e56cb8854c0de4a211cb2be656
+//
+func ConvexHull(points []image.Point, hull *Mat, clockwise bool, returnPoints bool) {
+ cPoints := toCPoints(points)
+ C.ConvexHull(cPoints, hull.p, C.bool(clockwise), C.bool(returnPoints))
+}
+
+// ConvexityDefects finds the convexity defects of a contour.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gada4437098113fd8683c932e0567f47ba
+//
+func ConvexityDefects(contour []image.Point, hull Mat, result *Mat) {
+ cPoints := toCPoints(contour)
+ C.ConvexityDefects(cPoints, hull.p, result.p)
+}
+
+// CvtColor converts an image from one color space to another.
+// It converts the src Mat image to the dst Mat using the
+// code param containing the desired ColorConversionCode color space.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga4e0972be5de079fed4e3a10e24ef5ef0
+//
+func CvtColor(src Mat, dst *Mat, code ColorConversionCode) {
+ C.CvtColor(src.p, dst.p, C.int(code))
+}
+
+// EqualizeHist normalizes the brightness and increases the contrast of the image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/dc7/group__imgproc__hist.html#ga7e54091f0c937d49bf84152a16f76d6e
+func EqualizeHist(src Mat, dst *Mat) {
+ C.EqualizeHist(src.p, dst.p)
+}
+
+// CalcHist Calculates a histogram of a set of images
+//
+// For futher details, please see:
+// https://docs.opencv.org/master/d6/dc7/group__imgproc__hist.html#ga6ca1876785483836f72a77ced8ea759a
+func CalcHist(src []Mat, channels []int, mask Mat, hist *Mat, size []int, ranges []float64, acc bool) {
+ cMatArray := make([]C.Mat, len(src))
+ for i, r := range src {
+ cMatArray[i] = r.p
+ }
+
+ cMats := C.struct_Mats{
+ mats: (*C.Mat)(&cMatArray[0]),
+ length: C.int(len(src)),
+ }
+
+ chansInts := []C.int{}
+ for _, v := range channels {
+ chansInts = append(chansInts, C.int(v))
+ }
+ chansVector := C.struct_IntVector{}
+ chansVector.val = (*C.int)(&chansInts[0])
+ chansVector.length = (C.int)(len(chansInts))
+
+ sizeInts := []C.int{}
+ for _, v := range size {
+ sizeInts = append(sizeInts, C.int(v))
+ }
+ sizeVector := C.struct_IntVector{}
+ sizeVector.val = (*C.int)(&sizeInts[0])
+ sizeVector.length = (C.int)(len(sizeInts))
+
+ rangeFloats := []C.float{}
+ for _, v := range ranges {
+ rangeFloats = append(rangeFloats, C.float(v))
+ }
+ rangeVector := C.struct_FloatVector{}
+ rangeVector.val = (*C.float)(&rangeFloats[0])
+ rangeVector.length = (C.int)(len(rangeFloats))
+
+ C.CalcHist(cMats, chansVector, mask.p, hist.p, sizeVector, rangeVector, C.bool(acc))
+}
+
+// CalcBackProject calculates the back projection of a histogram.
+//
+// For futher details, please see:
+// https://docs.opencv.org/3.4/d6/dc7/group__imgproc__hist.html#ga3a0af640716b456c3d14af8aee12e3ca
+func CalcBackProject(src []Mat, channels []int, hist Mat, backProject *Mat, ranges []float64, uniform bool) {
+ cMatArray := make([]C.Mat, len(src))
+ for i, r := range src {
+ cMatArray[i] = r.p
+ }
+
+ cMats := C.struct_Mats{
+ mats: (*C.Mat)(&cMatArray[0]),
+ length: C.int(len(src)),
+ }
+
+ chansInts := []C.int{}
+ for _, v := range channels {
+ chansInts = append(chansInts, C.int(v))
+ }
+ chansVector := C.struct_IntVector{}
+ chansVector.val = (*C.int)(&chansInts[0])
+ chansVector.length = (C.int)(len(chansInts))
+
+ rangeFloats := []C.float{}
+ for _, v := range ranges {
+ rangeFloats = append(rangeFloats, C.float(v))
+ }
+ rangeVector := C.struct_FloatVector{}
+ rangeVector.val = (*C.float)(&rangeFloats[0])
+ rangeVector.length = (C.int)(len(rangeFloats))
+
+ C.CalcBackProject(cMats, chansVector, hist.p, backProject.p, rangeVector, C.bool(uniform))
+}
+
+// HistCompMethod is the method for Histogram comparison
+// For more information, see https://docs.opencv.org/master/d6/dc7/group__imgproc__hist.html#ga994f53817d621e2e4228fc646342d386
+type HistCompMethod int
+
+const (
+ // HistCmpCorrel calculates the Correlation
+ HistCmpCorrel HistCompMethod = 0
+
+ // HistCmpChiSqr calculates the Chi-Square
+ HistCmpChiSqr = 1
+
+ // HistCmpIntersect calculates the Intersection
+ HistCmpIntersect = 2
+
+ // HistCmpBhattacharya applies the HistCmpBhattacharya by calculating the Bhattacharya distance.
+ HistCmpBhattacharya = 3
+
+ // HistCmpHellinger applies the HistCmpBhattacharya comparison. It is a synonym to HistCmpBhattacharya.
+ HistCmpHellinger = HistCmpBhattacharya
+
+ // HistCmpChiSqrAlt applies the Alternative Chi-Square (regularly used for texture comparsion).
+ HistCmpChiSqrAlt = 4
+
+ // HistCmpKlDiv applies the Kullback-Liebler divergence comparison.
+ HistCmpKlDiv = 5
+)
+
+// CompareHist Compares two histograms.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/dc7/group__imgproc__hist.html#gaf4190090efa5c47cb367cf97a9a519bd
+func CompareHist(hist1 Mat, hist2 Mat, method HistCompMethod) float32 {
+ return float32(C.CompareHist(hist1.p, hist2.p, C.int(method)))
+}
+
+// ClipLine clips the line against the image rectangle.
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#gaf483cb46ad6b049bc35ec67052ef1c2c
+//
+func ClipLine(imgSize image.Point, pt1 image.Point, pt2 image.Point) bool {
+ pSize := C.struct_Size{
+ width: C.int(imgSize.X),
+ height: C.int(imgSize.Y),
+ }
+
+ rPt1 := C.struct_Point{
+ x: C.int(pt1.X),
+ y: C.int(pt1.Y),
+ }
+
+ rPt2 := C.struct_Point{
+ x: C.int(pt2.X),
+ y: C.int(pt2.Y),
+ }
+
+ return bool(C.ClipLine(pSize, rPt1, rPt2))
+}
+
+// BilateralFilter applies a bilateral filter to an image.
+//
+// Bilateral filtering is described here:
+// http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
+//
+// BilateralFilter can reduce unwanted noise very well while keeping edges
+// fairly sharp. However, it is very slow compared to most filters.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga9d7064d478c95d60003cf839430737ed
+//
+func BilateralFilter(src Mat, dst *Mat, diameter int, sigmaColor float64, sigmaSpace float64) {
+ C.BilateralFilter(src.p, dst.p, C.int(diameter), C.double(sigmaColor), C.double(sigmaSpace))
+}
+
+// Blur blurs an image Mat using a normalized box filter.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga8c45db9afe636703801b0b2e440fce37
+//
+func Blur(src Mat, dst *Mat, ksize image.Point) {
+ pSize := C.struct_Size{
+ width: C.int(ksize.X),
+ height: C.int(ksize.Y),
+ }
+
+ C.Blur(src.p, dst.p, pSize)
+}
+
+// BoxFilter blurs an image using the box filter.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gad533230ebf2d42509547d514f7d3fbc3
+//
+func BoxFilter(src Mat, dst *Mat, depth int, ksize image.Point) {
+ pSize := C.struct_Size{
+ height: C.int(ksize.X),
+ width: C.int(ksize.Y),
+ }
+ C.BoxFilter(src.p, dst.p, C.int(depth), pSize)
+}
+
+// SqBoxFilter calculates the normalized sum of squares of the pixel values overlapping the filter.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga045028184a9ef65d7d2579e5c4bff6c0
+//
+func SqBoxFilter(src Mat, dst *Mat, depth int, ksize image.Point) {
+ pSize := C.struct_Size{
+ height: C.int(ksize.X),
+ width: C.int(ksize.Y),
+ }
+ C.SqBoxFilter(src.p, dst.p, C.int(depth), pSize)
+}
+
+// Dilate dilates an image by using a specific structuring element.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga4ff0f3318642c4f469d0e11f242f3b6c
+//
+func Dilate(src Mat, dst *Mat, kernel Mat) {
+ C.Dilate(src.p, dst.p, kernel.p)
+}
+
+// DistanceTransformLabelTypes are the types of the DistanceTransform algorithm flag
+type DistanceTransformLabelTypes int
+
+const (
+ // DistanceLabelCComp assigns the same label to each connected component of zeros in the source image
+ // (as well as all the non-zero pixels closest to the connected component).
+ DistanceLabelCComp DistanceTransformLabelTypes = 0
+
+ // DistanceLabelPixel assigns its own label to each zero pixel (and all the non-zero pixels closest to it).
+ DistanceLabelPixel
+)
+
+// DistanceTransformMasks are the marsk sizes for distance transform
+type DistanceTransformMasks int
+
+const (
+ // DistanceMask3 is a mask of size 3
+ DistanceMask3 DistanceTransformMasks = 0
+
+ // DistanceMask5 is a mask of size 3
+ DistanceMask5
+
+ // DistanceMaskPrecise is not currently supported
+ DistanceMaskPrecise
+)
+
+// DistanceTransform Calculates the distance to the closest zero pixel for each pixel of the source image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga8a0b7fdfcb7a13dde018988ba3a43042
+//
+func DistanceTransform(src Mat, dst *Mat, labels *Mat, distType DistanceTypes, maskSize DistanceTransformMasks, labelType DistanceTransformLabelTypes) {
+ C.DistanceTransform(src.p, dst.p, labels.p, C.int(distType), C.int(maskSize), C.int(labelType))
+}
+
+// Erode erodes an image by using a specific structuring element.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gaeb1e0c1033e3f6b891a25d0511362aeb
+//
+func Erode(src Mat, dst *Mat, kernel Mat) {
+ C.Erode(src.p, dst.p, kernel.p)
+}
+
+// RetrievalMode is the mode of the contour retrieval algorithm.
+type RetrievalMode int
+
+const (
+ // RetrievalExternal retrieves only the extreme outer contours.
+ // It sets `hierarchy[i][2]=hierarchy[i][3]=-1` for all the contours.
+ RetrievalExternal RetrievalMode = 0
+
+ // RetrievalList retrieves all of the contours without establishing
+ // any hierarchical relationships.
+ RetrievalList = 1
+
+ // RetrievalCComp retrieves all of the contours and organizes them into
+ // a two-level hierarchy. At the top level, there are external boundaries
+ // of the components. At the second level, there are boundaries of the holes.
+ // If there is another contour inside a hole of a connected component, it
+ // is still put at the top level.
+ RetrievalCComp = 2
+
+ // RetrievalTree retrieves all of the contours and reconstructs a full
+ // hierarchy of nested contours.
+ RetrievalTree = 3
+
+ // RetrievalFloodfill lacks a description in the original header.
+ RetrievalFloodfill = 4
+)
+
+// ContourApproximationMode is the mode of the contour approximation algorithm.
+type ContourApproximationMode int
+
+const (
+ // ChainApproxNone stores absolutely all the contour points. That is,
+ // any 2 subsequent points (x1,y1) and (x2,y2) of the contour will be
+ // either horizontal, vertical or diagonal neighbors, that is,
+ // max(abs(x1-x2),abs(y2-y1))==1.
+ ChainApproxNone ContourApproximationMode = 1
+
+ // ChainApproxSimple compresses horizontal, vertical, and diagonal segments
+ // and leaves only their end points.
+ // For example, an up-right rectangular contour is encoded with 4 points.
+ ChainApproxSimple = 2
+
+ // ChainApproxTC89L1 applies one of the flavors of the Teh-Chin chain
+ // approximation algorithms.
+ ChainApproxTC89L1 = 3
+
+ // ChainApproxTC89KCOS applies one of the flavors of the Teh-Chin chain
+ // approximation algorithms.
+ ChainApproxTC89KCOS = 4
+)
+
+// BoundingRect calculates the up-right bounding rectangle of a point set.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.3.0/d3/dc0/group__imgproc__shape.html#gacb413ddce8e48ff3ca61ed7cf626a366
+//
+func BoundingRect(contour []image.Point) image.Rectangle {
+ cContour := toCPoints(contour)
+ r := C.BoundingRect(cContour)
+ rect := image.Rect(int(r.x), int(r.y), int(r.x+r.width), int(r.y+r.height))
+ return rect
+}
+
+// BoxPoints finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.
+//
+// For further Details, please see:
+// https://docs.opencv.org/3.3.0/d3/dc0/group__imgproc__shape.html#gaf78d467e024b4d7936cf9397185d2f5c
+//
+func BoxPoints(rect RotatedRect, pts *Mat) {
+
+ rPoints := toCPoints(rect.Contour)
+
+ rRect := C.struct_Rect{
+ x: C.int(rect.BoundingRect.Min.X),
+ y: C.int(rect.BoundingRect.Min.Y),
+ width: C.int(rect.BoundingRect.Max.X - rect.BoundingRect.Min.X),
+ height: C.int(rect.BoundingRect.Max.Y - rect.BoundingRect.Min.Y),
+ }
+
+ rCenter := C.struct_Point{
+ x: C.int(rect.Center.X),
+ y: C.int(rect.Center.Y),
+ }
+
+ rSize := C.struct_Size{
+ width: C.int(rect.Width),
+ height: C.int(rect.Height),
+ }
+
+ r := C.struct_RotatedRect{
+ pts: rPoints,
+ boundingRect: rRect,
+ center: rCenter,
+ size: rSize,
+ angle: C.double(rect.Angle),
+ }
+
+ C.BoxPoints(r, pts.p)
+}
+
+// ContourArea calculates a contour area.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.3.0/d3/dc0/group__imgproc__shape.html#ga2c759ed9f497d4a618048a2f56dc97f1
+//
+func ContourArea(contour []image.Point) float64 {
+ cContour := toCPoints(contour)
+ result := C.ContourArea(cContour)
+ return float64(result)
+}
+
+type RotatedRect struct {
+ Contour []image.Point
+ BoundingRect image.Rectangle
+ Center image.Point
+ Width int
+ Height int
+ Angle float64
+}
+
+// toPoints converts C.Contour to []image.Points
+//
+func toPoints(points C.Contour) []image.Point {
+ pArray := points.points
+ pLength := int(points.length)
+
+ pHdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(pArray)),
+ Len: pLength,
+ Cap: pLength,
+ }
+ sPoints := *(*[]C.Point)(unsafe.Pointer(&pHdr))
+
+ points4 := make([]image.Point, pLength)
+ for j, pt := range sPoints {
+ points4[j] = image.Pt(int(pt.x), int(pt.y))
+ }
+ return points4
+}
+
+// MinAreaRect finds a rotated rectangle of the minimum area enclosing the input 2D point set.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.3.0/d3/dc0/group__imgproc__shape.html#ga3d476a3417130ae5154aea421ca7ead9
+//
+func MinAreaRect(points []image.Point) RotatedRect {
+ cPoints := toCPoints(points)
+ result := C.MinAreaRect(cPoints)
+
+ defer C.Points_Close(result.pts)
+ return RotatedRect{
+ Contour: toPoints(result.pts),
+ BoundingRect: image.Rect(int(result.boundingRect.x), int(result.boundingRect.y), int(result.boundingRect.x)+int(result.boundingRect.width), int(result.boundingRect.y)+int(result.boundingRect.height)),
+ Center: image.Pt(int(result.center.x), int(result.center.y)),
+ Width: int(result.size.width),
+ Height: int(result.size.height),
+ Angle: float64(result.angle),
+ }
+}
+
+// FitEllipse Fits an ellipse around a set of 2D points.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gaf259efaad93098103d6c27b9e4900ffa
+//
+func FitEllipse(points []image.Point) RotatedRect {
+ cPoints := toCPoints(points)
+ cRect := C.FitEllipse(cPoints)
+ defer C.Points_Close(cRect.pts)
+
+ return RotatedRect{
+ Contour: toPoints(cRect.pts),
+ BoundingRect: image.Rect(int(cRect.boundingRect.x), int(cRect.boundingRect.y), int(cRect.boundingRect.x)+int(cRect.boundingRect.width), int(cRect.boundingRect.y)+int(cRect.boundingRect.height)),
+ Center: image.Pt(int(cRect.center.x), int(cRect.center.y)),
+ Width: int(cRect.size.width),
+ Height: int(cRect.size.height),
+ Angle: float64(cRect.angle),
+ }
+
+}
+
+// MinEnclosingCircle finds a circle of the minimum area enclosing the input 2D point set.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html#ga8ce13c24081bbc7151e9326f412190f1
+func MinEnclosingCircle(points []image.Point) (x, y, radius float32) {
+ cPoints := toCPoints(points)
+ cCenterPoint := C.struct_Point2f{}
+ var cRadius C.float
+ C.MinEnclosingCircle(cPoints, &cCenterPoint, &cRadius)
+ x, y = float32(cCenterPoint.x), float32(cCenterPoint.y)
+ radius = float32(cRadius)
+ return x, y, radius
+}
+
+// FindContours finds contours in a binary image.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.3.0/d3/dc0/group__imgproc__shape.html#ga17ed9f5d79ae97bd4c7cf18403e1689a
+//
+func FindContours(src Mat, mode RetrievalMode, method ContourApproximationMode) [][]image.Point {
+ ret := C.FindContours(src.p, C.int(mode), C.int(method))
+ defer C.Contours_Close(ret)
+
+ cArray := ret.contours
+ cLength := int(ret.length)
+ cHdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cArray)),
+ Len: cLength,
+ Cap: cLength,
+ }
+ sContours := *(*[]C.Points)(unsafe.Pointer(&cHdr))
+
+ contours := make([][]image.Point, cLength)
+ for i, pts := range sContours {
+ pArray := pts.points
+ pLength := int(pts.length)
+ pHdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(pArray)),
+ Len: pLength,
+ Cap: pLength,
+ }
+ sPoints := *(*[]C.Point)(unsafe.Pointer(&pHdr))
+
+ points := make([]image.Point, pLength)
+ for j, pt := range sPoints {
+ points[j] = image.Pt(int(pt.x), int(pt.y))
+ }
+ contours[i] = points
+ }
+
+ return contours
+}
+
+//ConnectedComponentsAlgorithmType specifies the type for ConnectedComponents
+type ConnectedComponentsAlgorithmType int
+
+const (
+ // SAUF algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity.
+ CCL_WU ConnectedComponentsAlgorithmType = 0
+
+ // BBDT algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity.
+ CCL_DEFAULT = 1
+
+ // BBDT algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity
+ CCL_GRANA = 2
+)
+
+// ConnectedComponents computes the connected components labeled image of boolean image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gaedef8c7340499ca391d459122e51bef5
+//
+func ConnectedComponents(src Mat, labels *Mat) int {
+ return int(C.ConnectedComponents(src.p, labels.p, C.int(8), C.int(MatTypeCV32S), C.int(CCL_DEFAULT)))
+}
+
+// ConnectedComponents computes the connected components labeled image of boolean image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gaedef8c7340499ca391d459122e51bef5
+//
+func ConnectedComponentsWithParams(src Mat, labels *Mat, conn int, ltype MatType,
+ ccltype ConnectedComponentsAlgorithmType) int {
+ return int(C.ConnectedComponents(src.p, labels.p, C.int(conn), C.int(ltype), C.int(ccltype)))
+}
+
+// ConnectedComponentsTypes are the connected components algorithm output formats
+type ConnectedComponentsTypes int
+
+const (
+ //The leftmost (x) coordinate which is the inclusive start of the bounding box in the horizontal direction.
+ CC_STAT_LEFT = 0
+
+ //The topmost (y) coordinate which is the inclusive start of the bounding box in the vertical direction.
+ CC_STAT_TOP = 1
+
+ // The horizontal size of the bounding box.
+ CC_STAT_WIDTH = 2
+
+ // The vertical size of the bounding box.
+ CC_STAT_HEIGHT = 3
+
+ // The total area (in pixels) of the connected component.
+ CC_STAT_AREA = 4
+
+ CC_STAT_MAX = 5
+)
+
+// ConnectedComponentsWithStats computes the connected components labeled image of boolean
+// image and also produces a statistics output for each label.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga107a78bf7cd25dec05fb4dfc5c9e765f
+//
+func ConnectedComponentsWithStats(src Mat, labels *Mat, stats *Mat, centroids *Mat) int {
+ return int(C.ConnectedComponentsWithStats(src.p, labels.p, stats.p, centroids.p,
+ C.int(8), C.int(MatTypeCV32S), C.int(CCL_DEFAULT)))
+}
+
+// ConnectedComponentsWithStats computes the connected components labeled image of boolean
+// image and also produces a statistics output for each label.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga107a78bf7cd25dec05fb4dfc5c9e765f
+//
+func ConnectedComponentsWithStatsWithParams(src Mat, labels *Mat, stats *Mat, centroids *Mat,
+ conn int, ltype MatType, ccltype ConnectedComponentsAlgorithmType) int {
+ return int(C.ConnectedComponentsWithStats(src.p, labels.p, stats.p, centroids.p, C.int(conn),
+ C.int(ltype), C.int(ccltype)))
+}
+
+// TemplateMatchMode is the type of the template matching operation.
+type TemplateMatchMode int
+
+const (
+ // TmSqdiff maps to TM_SQDIFF
+ TmSqdiff TemplateMatchMode = 0
+ // TmSqdiffNormed maps to TM_SQDIFF_NORMED
+ TmSqdiffNormed = 1
+ // TmCcorr maps to TM_CCORR
+ TmCcorr = 2
+ // TmCcorrNormed maps to TM_CCORR_NORMED
+ TmCcorrNormed = 3
+ // TmCcoeff maps to TM_CCOEFF
+ TmCcoeff = 4
+ // TmCcoeffNormed maps to TM_CCOEFF_NORMED
+ TmCcoeffNormed = 5
+)
+
+// MatchTemplate compares a template against overlapped image regions.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/df/dfb/group__imgproc__object.html#ga586ebfb0a7fb604b35a23d85391329be
+//
+func MatchTemplate(image Mat, templ Mat, result *Mat, method TemplateMatchMode, mask Mat) {
+ C.MatchTemplate(image.p, templ.p, result.p, C.int(method), mask.p)
+}
+
+// Moments calculates all of the moments up to the third order of a polygon
+// or rasterized shape.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga556a180f43cab22649c23ada36a8a139
+//
+func Moments(src Mat, binaryImage bool) map[string]float64 {
+ r := C.Moments(src.p, C.bool(binaryImage))
+
+ result := make(map[string]float64)
+ result["m00"] = float64(r.m00)
+ result["m10"] = float64(r.m10)
+ result["m01"] = float64(r.m01)
+ result["m20"] = float64(r.m20)
+ result["m11"] = float64(r.m11)
+ result["m02"] = float64(r.m02)
+ result["m30"] = float64(r.m30)
+ result["m21"] = float64(r.m21)
+ result["m12"] = float64(r.m12)
+ result["m03"] = float64(r.m03)
+ result["mu20"] = float64(r.mu20)
+ result["mu11"] = float64(r.mu11)
+ result["mu02"] = float64(r.mu02)
+ result["mu30"] = float64(r.mu30)
+ result["mu21"] = float64(r.mu21)
+ result["mu12"] = float64(r.mu12)
+ result["mu03"] = float64(r.mu03)
+ result["nu20"] = float64(r.nu20)
+ result["nu11"] = float64(r.nu11)
+ result["nu02"] = float64(r.nu02)
+ result["nu30"] = float64(r.nu30)
+ result["nu21"] = float64(r.nu21)
+ result["nu12"] = float64(r.nu12)
+ result["nu03"] = float64(r.nu03)
+
+ return result
+}
+
+// PyrDown blurs an image and downsamples it.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gaf9bba239dfca11654cb7f50f889fc2ff
+//
+func PyrDown(src Mat, dst *Mat, ksize image.Point, borderType BorderType) {
+ pSize := C.struct_Size{
+ height: C.int(ksize.X),
+ width: C.int(ksize.Y),
+ }
+ C.PyrDown(src.p, dst.p, pSize, C.int(borderType))
+}
+
+// PyrUp upsamples an image and then blurs it.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gada75b59bdaaca411ed6fee10085eb784
+//
+func PyrUp(src Mat, dst *Mat, ksize image.Point, borderType BorderType) {
+ pSize := C.struct_Size{
+ height: C.int(ksize.X),
+ width: C.int(ksize.Y),
+ }
+ C.PyrUp(src.p, dst.p, pSize, C.int(borderType))
+}
+
+// MorphologyDefaultBorder returns "magic" border value for erosion and dilation.
+// It is automatically transformed to Scalar::all(-DBL_MAX) for dilation.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga94756fad83d9d24d29c9bf478558c40a
+//
+func MorphologyDefaultBorderValue() Scalar {
+ var scalar C.Scalar = C.MorphologyDefaultBorderValue()
+ return NewScalar(float64(scalar.val1), float64(scalar.val2), float64(scalar.val3), float64(scalar.val4))
+}
+
+// MorphologyEx performs advanced morphological transformations.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga67493776e3ad1a3df63883829375201f
+//
+func MorphologyEx(src Mat, dst *Mat, op MorphType, kernel Mat) {
+ C.MorphologyEx(src.p, dst.p, C.int(op), kernel.p)
+}
+
+// MorphologyExWithParams performs advanced morphological transformations.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga67493776e3ad1a3df63883829375201f
+//
+func MorphologyExWithParams(src Mat, dst *Mat, op MorphType, kernel Mat, iterations int, borderType BorderType) {
+ pt := C.struct_Point{
+ x: C.int(-1),
+ y: C.int(-1),
+ }
+ C.MorphologyExWithParams(src.p, dst.p, C.int(op), kernel.p, pt, C.int(iterations), C.int(borderType))
+}
+
+// MorphShape is the shape of the structuring element used for Morphing operations.
+type MorphShape int
+
+const (
+ // MorphRect is the rectangular morph shape.
+ MorphRect MorphShape = 0
+
+ // MorphCross is the cross morph shape.
+ MorphCross = 1
+
+ // MorphEllipse is the ellipse morph shape.
+ MorphEllipse = 2
+)
+
+// GetStructuringElement returns a structuring element of the specified size
+// and shape for morphological operations.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gac342a1bb6eabf6f55c803b09268e36dc
+//
+func GetStructuringElement(shape MorphShape, ksize image.Point) Mat {
+ sz := C.struct_Size{
+ width: C.int(ksize.X),
+ height: C.int(ksize.Y),
+ }
+
+ return newMat(C.GetStructuringElement(C.int(shape), sz))
+}
+
+// MorphType type of morphological operation.
+type MorphType int
+
+const (
+ // MorphErode operation
+ MorphErode MorphType = 0
+
+ // MorphDilate operation
+ MorphDilate = 1
+
+ // MorphOpen operation
+ MorphOpen = 2
+
+ // MorphClose operation
+ MorphClose = 3
+
+ // MorphGradient operation
+ MorphGradient = 4
+
+ // MorphTophat operation
+ MorphTophat = 5
+
+ // MorphBlackhat operation
+ MorphBlackhat = 6
+
+ // MorphHitmiss operation
+ MorphHitmiss = 7
+)
+
+// BorderType type of border.
+type BorderType int
+
+const (
+ // BorderConstant border type
+ BorderConstant BorderType = 0
+
+ // BorderReplicate border type
+ BorderReplicate = 1
+
+ // BorderReflect border type
+ BorderReflect = 2
+
+ // BorderWrap border type
+ BorderWrap = 3
+
+ // BorderReflect101 border type
+ BorderReflect101 = 4
+
+ // BorderTransparent border type
+ BorderTransparent = 5
+
+ // BorderDefault border type
+ BorderDefault = BorderReflect101
+
+ // BorderIsolated border type
+ BorderIsolated = 16
+)
+
+// GaussianBlur blurs an image Mat using a Gaussian filter.
+// The function convolves the src Mat image into the dst Mat using
+// the specified Gaussian kernel params.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gaabe8c836e97159a9193fb0b11ac52cf1
+//
+func GaussianBlur(src Mat, dst *Mat, ksize image.Point, sigmaX float64,
+ sigmaY float64, borderType BorderType) {
+ pSize := C.struct_Size{
+ width: C.int(ksize.X),
+ height: C.int(ksize.Y),
+ }
+
+ C.GaussianBlur(src.p, dst.p, pSize, C.double(sigmaX), C.double(sigmaY), C.int(borderType))
+}
+
+// Sobel calculates the first, second, third, or mixed image derivatives using an extended Sobel operator
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gacea54f142e81b6758cb6f375ce782c8d
+//
+func Sobel(src Mat, dst *Mat, ddepth, dx, dy, ksize int, scale, delta float64, borderType BorderType) {
+ C.Sobel(src.p, dst.p, C.int(ddepth), C.int(dx), C.int(dy), C.int(ksize), C.double(scale), C.double(delta), C.int(borderType))
+}
+
+// SpatialGradient calculates the first order image derivative in both x and y using a Sobel operator.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga405d03b20c782b65a4daf54d233239a2
+//
+func SpatialGradient(src Mat, dx, dy *Mat, ksize int, borderType BorderType) {
+ C.SpatialGradient(src.p, dx.p, dy.p, C.int(ksize), C.int(borderType))
+}
+
+// Laplacian calculates the Laplacian of an image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gad78703e4c8fe703d479c1860d76429e6
+//
+func Laplacian(src Mat, dst *Mat, dDepth int, size int, scale float64,
+ delta float64, borderType BorderType) {
+ C.Laplacian(src.p, dst.p, C.int(dDepth), C.int(size), C.double(scale), C.double(delta), C.int(borderType))
+}
+
+// Scharr calculates the first x- or y- image derivative using Scharr operator.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gaa13106761eedf14798f37aa2d60404c9
+//
+func Scharr(src Mat, dst *Mat, dDepth int, dx int, dy int, scale float64,
+ delta float64, borderType BorderType) {
+ C.Scharr(src.p, dst.p, C.int(dDepth), C.int(dx), C.int(dy), C.double(scale), C.double(delta), C.int(borderType))
+}
+
+// MedianBlur blurs an image using the median filter.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga564869aa33e58769b4469101aac458f9
+//
+func MedianBlur(src Mat, dst *Mat, ksize int) {
+ C.MedianBlur(src.p, dst.p, C.int(ksize))
+}
+
+// Canny finds edges in an image using the Canny algorithm.
+// The function finds edges in the input image image and marks
+// them in the output map edges using the Canny algorithm.
+// The smallest value between threshold1 and threshold2 is used
+// for edge linking. The largest value is used to
+// find initial segments of strong edges.
+// See http://en.wikipedia.org/wiki/Canny_edge_detector
+//
+// For further details, please see:
+// http://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga04723e007ed888ddf11d9ba04e2232de
+//
+func Canny(src Mat, edges *Mat, t1 float32, t2 float32) {
+ C.Canny(src.p, edges.p, C.double(t1), C.double(t2))
+}
+
+// CornerSubPix Refines the corner locations. The function iterates to find
+// the sub-pixel accurate location of corners or radial saddle points.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga354e0d7c86d0d9da75de9b9701a9a87e
+//
+func CornerSubPix(img Mat, corners *Mat, winSize image.Point, zeroZone image.Point, criteria TermCriteria) {
+ winSz := C.struct_Size{
+ width: C.int(winSize.X),
+ height: C.int(winSize.Y),
+ }
+
+ zeroSz := C.struct_Size{
+ width: C.int(zeroZone.X),
+ height: C.int(zeroZone.Y),
+ }
+
+ C.CornerSubPix(img.p, corners.p, winSz, zeroSz, criteria.p)
+ return
+}
+
+// GoodFeaturesToTrack determines strong corners on an image. The function
+// finds the most prominent corners in the image or in the specified image region.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga1d6bb77486c8f92d79c8793ad995d541
+//
+func GoodFeaturesToTrack(img Mat, corners *Mat, maxCorners int, quality float64, minDist float64) {
+ C.GoodFeaturesToTrack(img.p, corners.p, C.int(maxCorners), C.double(quality), C.double(minDist))
+}
+
+// GrabCutMode is the flag for GrabCut algorithm.
+type GrabCutMode int
+
+const (
+ // GCInitWithRect makes the function initialize the state and the mask using the provided rectangle.
+ // After that it runs the itercount iterations of the algorithm.
+ GCInitWithRect GrabCutMode = 0
+ // GCInitWithMask makes the function initialize the state using the provided mask.
+ // GCInitWithMask and GCInitWithRect can be combined.
+ // Then all the pixels outside of the ROI are automatically initialized with GC_BGD.
+ GCInitWithMask = 1
+ // GCEval means that the algorithm should just resume.
+ GCEval = 2
+ // GCEvalFreezeModel means that the algorithm should just run a single iteration of the GrabCut algorithm
+ // with the fixed model
+ GCEvalFreezeModel = 3
+)
+
+// Grabcut runs the GrabCut algorithm.
+// The function implements the GrabCut image segmentation algorithm.
+// For further details, please see:
+// https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga909c1dda50efcbeaa3ce126be862b37f
+//
+func GrabCut(img Mat, mask *Mat, r image.Rectangle, bgdModel *Mat, fgdModel *Mat, iterCount int, mode GrabCutMode) {
+ cRect := C.struct_Rect{
+ x: C.int(r.Min.X),
+ y: C.int(r.Min.Y),
+ width: C.int(r.Size().X),
+ height: C.int(r.Size().Y),
+ }
+
+ C.GrabCut(img.p, mask.p, cRect, bgdModel.p, fgdModel.p, C.int(iterCount), C.int(mode))
+}
+
+// HoughMode is the type for Hough transform variants.
+type HoughMode int
+
+const (
+ // HoughStandard is the classical or standard Hough transform.
+ HoughStandard HoughMode = 0
+ // HoughProbabilistic is the probabilistic Hough transform (more efficient
+ // in case if the picture contains a few long linear segments).
+ HoughProbabilistic = 1
+ // HoughMultiScale is the multi-scale variant of the classical Hough
+ // transform.
+ HoughMultiScale = 2
+ // HoughGradient is basically 21HT, described in: HK Yuen, John Princen,
+ // John Illingworth, and Josef Kittler. Comparative study of hough
+ // transform methods for circle finding. Image and Vision Computing,
+ // 8(1):71–77, 1990.
+ HoughGradient = 3
+)
+
+// HoughCircles finds circles in a grayscale image using the Hough transform.
+// The only "method" currently supported is HoughGradient. If you want to pass
+// more parameters, please see `HoughCirclesWithParams`.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga47849c3be0d0406ad3ca45db65a25d2d
+//
+func HoughCircles(src Mat, circles *Mat, method HoughMode, dp, minDist float64) {
+ C.HoughCircles(src.p, circles.p, C.int(method), C.double(dp), C.double(minDist))
+}
+
+// HoughCirclesWithParams finds circles in a grayscale image using the Hough
+// transform. The only "method" currently supported is HoughGradient.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga47849c3be0d0406ad3ca45db65a25d2d
+//
+func HoughCirclesWithParams(src Mat, circles *Mat, method HoughMode, dp, minDist, param1, param2 float64, minRadius, maxRadius int) {
+ C.HoughCirclesWithParams(src.p, circles.p, C.int(method), C.double(dp), C.double(minDist), C.double(param1), C.double(param2), C.int(minRadius), C.int(maxRadius))
+}
+
+// HoughLines implements the standard or standard multi-scale Hough transform
+// algorithm for line detection. For a good explanation of Hough transform, see:
+// http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm
+//
+// For further details, please see:
+// http://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga46b4e588934f6c8dfd509cc6e0e4545a
+//
+func HoughLines(src Mat, lines *Mat, rho float32, theta float32, threshold int) {
+ C.HoughLines(src.p, lines.p, C.double(rho), C.double(theta), C.int(threshold))
+}
+
+// HoughLinesP implements the probabilistic Hough transform
+// algorithm for line detection. For a good explanation of Hough transform, see:
+// http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm
+//
+// For further details, please see:
+// http://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga8618180a5948286384e3b7ca02f6feeb
+//
+func HoughLinesP(src Mat, lines *Mat, rho float32, theta float32, threshold int) {
+ C.HoughLinesP(src.p, lines.p, C.double(rho), C.double(theta), C.int(threshold))
+}
+func HoughLinesPWithParams(src Mat, lines *Mat, rho float32, theta float32, threshold int, minLineLength float32, maxLineGap float32) {
+ C.HoughLinesPWithParams(src.p, lines.p, C.double(rho), C.double(theta), C.int(threshold), C.double(minLineLength), C.double(maxLineGap))
+}
+
+// HoughLinesPointSet implements the Hough transform algorithm for line
+// detection on a set of points. For a good explanation of Hough transform, see:
+// http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga2858ef61b4e47d1919facac2152a160e
+//
+func HoughLinesPointSet(points Mat, lines *Mat, linesMax int, threshold int,
+ minRho float32, maxRho float32, rhoStep float32,
+ minTheta float32, maxTheta float32, thetaStep float32) {
+ C.HoughLinesPointSet(points.p, lines.p, C.int(linesMax), C.int(threshold),
+ C.double(minRho), C.double(maxRho), C.double(rhoStep),
+ C.double(minTheta), C.double(maxTheta), C.double(thetaStep))
+}
+
+// Integral calculates one or more integral images for the source image.
+// For further details, please see:
+// https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga97b87bec26908237e8ba0f6e96d23e28
+//
+func Integral(src Mat, sum *Mat, sqsum *Mat, tilted *Mat) {
+ C.Integral(src.p, sum.p, sqsum.p, tilted.p)
+}
+
+// ThresholdType type of threshold operation.
+type ThresholdType int
+
+const (
+ // ThresholdBinary threshold type
+ ThresholdBinary ThresholdType = 0
+
+ // ThresholdBinaryInv threshold type
+ ThresholdBinaryInv = 1
+
+ // ThresholdTrunc threshold type
+ ThresholdTrunc = 2
+
+ // ThresholdToZero threshold type
+ ThresholdToZero = 3
+
+ // ThresholdToZeroInv threshold type
+ ThresholdToZeroInv = 4
+
+ // ThresholdMask threshold type
+ ThresholdMask = 7
+
+ // ThresholdOtsu threshold type
+ ThresholdOtsu = 8
+
+ // ThresholdTriangle threshold type
+ ThresholdTriangle = 16
+)
+
+// Threshold applies a fixed-level threshold to each array element.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.3.0/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57
+//
+func Threshold(src Mat, dst *Mat, thresh float32, maxvalue float32, typ ThresholdType) {
+ C.Threshold(src.p, dst.p, C.double(thresh), C.double(maxvalue), C.int(typ))
+}
+
+// AdaptiveThresholdType type of adaptive threshold operation.
+type AdaptiveThresholdType int
+
+const (
+ // AdaptiveThresholdMean threshold type
+ AdaptiveThresholdMean AdaptiveThresholdType = 0
+
+ // AdaptiveThresholdGaussian threshold type
+ AdaptiveThresholdGaussian = 1
+)
+
+// AdaptiveThreshold applies a fixed-level threshold to each array element.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga72b913f352e4a1b1b397736707afcde3
+//
+func AdaptiveThreshold(src Mat, dst *Mat, maxValue float32, adaptiveTyp AdaptiveThresholdType, typ ThresholdType, blockSize int, c float32) {
+ C.AdaptiveThreshold(src.p, dst.p, C.double(maxValue), C.int(adaptiveTyp), C.int(typ), C.int(blockSize), C.double(c))
+}
+
+// ArrowedLine draws a arrow segment pointing from the first point
+// to the second one.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga0a165a3ca093fd488ac709fdf10c05b2
+//
+func ArrowedLine(img *Mat, pt1 image.Point, pt2 image.Point, c color.RGBA, thickness int) {
+ sp1 := C.struct_Point{
+ x: C.int(pt1.X),
+ y: C.int(pt1.Y),
+ }
+
+ sp2 := C.struct_Point{
+ x: C.int(pt2.X),
+ y: C.int(pt2.Y),
+ }
+
+ sColor := C.struct_Scalar{
+ val1: C.double(c.B),
+ val2: C.double(c.G),
+ val3: C.double(c.R),
+ val4: C.double(c.A),
+ }
+
+ C.ArrowedLine(img.p, sp1, sp2, sColor, C.int(thickness))
+}
+
+// Circle draws a circle.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#gaf10604b069374903dbd0f0488cb43670
+//
+func Circle(img *Mat, center image.Point, radius int, c color.RGBA, thickness int) {
+ pc := C.struct_Point{
+ x: C.int(center.X),
+ y: C.int(center.Y),
+ }
+
+ sColor := C.struct_Scalar{
+ val1: C.double(c.B),
+ val2: C.double(c.G),
+ val3: C.double(c.R),
+ val4: C.double(c.A),
+ }
+
+ C.Circle(img.p, pc, C.int(radius), sColor, C.int(thickness))
+}
+
+// Ellipse draws a simple or thick elliptic arc or fills an ellipse sector.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga28b2267d35786f5f890ca167236cbc69
+//
+func Ellipse(img *Mat, center, axes image.Point, angle, startAngle, endAngle float64, c color.RGBA, thickness int) {
+ pc := C.struct_Point{
+ x: C.int(center.X),
+ y: C.int(center.Y),
+ }
+ pa := C.struct_Point{
+ x: C.int(axes.X),
+ y: C.int(axes.Y),
+ }
+
+ sColor := C.struct_Scalar{
+ val1: C.double(c.B),
+ val2: C.double(c.G),
+ val3: C.double(c.R),
+ val4: C.double(c.A),
+ }
+
+ C.Ellipse(img.p, pc, pa, C.double(angle), C.double(startAngle), C.double(endAngle), sColor, C.int(thickness))
+}
+
+// Line draws a line segment connecting two points.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga7078a9fae8c7e7d13d24dac2520ae4a2
+//
+func Line(img *Mat, pt1 image.Point, pt2 image.Point, c color.RGBA, thickness int) {
+ sp1 := C.struct_Point{
+ x: C.int(pt1.X),
+ y: C.int(pt1.Y),
+ }
+
+ sp2 := C.struct_Point{
+ x: C.int(pt2.X),
+ y: C.int(pt2.Y),
+ }
+
+ sColor := C.struct_Scalar{
+ val1: C.double(c.B),
+ val2: C.double(c.G),
+ val3: C.double(c.R),
+ val4: C.double(c.A),
+ }
+
+ C.Line(img.p, sp1, sp2, sColor, C.int(thickness))
+}
+
+// Rectangle draws a simple, thick, or filled up-right rectangle.
+// It renders a rectangle with the desired characteristics to the target Mat image.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga346ac30b5c74e9b5137576c9ee9e0e8c
+//
+func Rectangle(img *Mat, r image.Rectangle, c color.RGBA, thickness int) {
+ cRect := C.struct_Rect{
+ x: C.int(r.Min.X),
+ y: C.int(r.Min.Y),
+ width: C.int(r.Size().X),
+ height: C.int(r.Size().Y),
+ }
+
+ sColor := C.struct_Scalar{
+ val1: C.double(c.B),
+ val2: C.double(c.G),
+ val3: C.double(c.R),
+ val4: C.double(c.A),
+ }
+
+ C.Rectangle(img.p, cRect, sColor, C.int(thickness))
+}
+
+// FillPoly fills the area bounded by one or more polygons.
+//
+// For more information, see:
+// https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#gaf30888828337aa4c6b56782b5dfbd4b7
+func FillPoly(img *Mat, pts [][]image.Point, c color.RGBA) {
+ points := make([]C.struct_Points, len(pts))
+
+ for i, pt := range pts {
+ p := (*C.struct_Point)(C.malloc(C.size_t(C.sizeof_struct_Point * len(pt))))
+ defer C.free(unsafe.Pointer(p))
+
+ pa := getPoints(p, len(pt))
+
+ for j, point := range pt {
+ pa[j] = C.struct_Point{
+ x: C.int(point.X),
+ y: C.int(point.Y),
+ }
+ }
+
+ points[i] = C.struct_Points{
+ points: (*C.Point)(p),
+ length: C.int(len(pt)),
+ }
+ }
+
+ cPoints := C.struct_Contours{
+ contours: (*C.struct_Points)(&points[0]),
+ length: C.int(len(pts)),
+ }
+
+ sColor := C.struct_Scalar{
+ val1: C.double(c.B),
+ val2: C.double(c.G),
+ val3: C.double(c.R),
+ val4: C.double(c.A),
+ }
+
+ C.FillPoly(img.p, cPoints, sColor)
+}
+
+// HersheyFont are the font libraries included in OpenCV.
+// Only a subset of the available Hershey fonts are supported by OpenCV.
+//
+// For more information, see:
+// http://sources.isc.org/utils/misc/hershey-font.txt
+//
+type HersheyFont int
+
+const (
+ // FontHersheySimplex is normal size sans-serif font.
+ FontHersheySimplex HersheyFont = 0
+ // FontHersheyPlain issmall size sans-serif font.
+ FontHersheyPlain = 1
+ // FontHersheyDuplex normal size sans-serif font
+ // (more complex than FontHersheySIMPLEX).
+ FontHersheyDuplex = 2
+ // FontHersheyComplex i a normal size serif font.
+ FontHersheyComplex = 3
+ // FontHersheyTriplex is a normal size serif font
+ // (more complex than FontHersheyCOMPLEX).
+ FontHersheyTriplex = 4
+ // FontHersheyComplexSmall is a smaller version of FontHersheyCOMPLEX.
+ FontHersheyComplexSmall = 5
+ // FontHersheyScriptSimplex is a hand-writing style font.
+ FontHersheyScriptSimplex = 6
+ // FontHersheyScriptComplex is a more complex variant of FontHersheyScriptSimplex.
+ FontHersheyScriptComplex = 7
+ // FontItalic is the flag for italic font.
+ FontItalic = 16
+)
+
+// LineType are the line libraries included in OpenCV.
+//
+// For more information, see:
+// https://vovkos.github.io/doxyrest-showcase/opencv/sphinx_rtd_theme/enum_cv_LineTypes.html
+//
+type LineType int
+
+const (
+ // Filled line
+ Filled LineType = -1
+ // Line4 4-connected line
+ Line4 = 4
+ // Line8 8-connected line
+ Line8 = 8
+ // LineAA antialiased line
+ LineAA = 16
+)
+
+// GetTextSize calculates the width and height of a text string.
+// It returns an image.Point with the size required to draw text using
+// a specific font face, scale, and thickness.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga3d2abfcb995fd2db908c8288199dba82
+//
+func GetTextSize(text string, fontFace HersheyFont, fontScale float64, thickness int) image.Point {
+ cText := C.CString(text)
+ defer C.free(unsafe.Pointer(cText))
+
+ sz := C.GetTextSize(cText, C.int(fontFace), C.double(fontScale), C.int(thickness))
+ return image.Pt(int(sz.width), int(sz.height))
+}
+
+// PutText draws a text string.
+// It renders the specified text string into the img Mat at the location
+// passed in the "org" param, using the desired font face, font scale,
+// color, and line thinkness.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga5126f47f883d730f633d74f07456c576
+//
+func PutText(img *Mat, text string, org image.Point, fontFace HersheyFont, fontScale float64, c color.RGBA, thickness int) {
+ cText := C.CString(text)
+ defer C.free(unsafe.Pointer(cText))
+
+ pOrg := C.struct_Point{
+ x: C.int(org.X),
+ y: C.int(org.Y),
+ }
+
+ sColor := C.struct_Scalar{
+ val1: C.double(c.B),
+ val2: C.double(c.G),
+ val3: C.double(c.R),
+ val4: C.double(c.A),
+ }
+
+ C.PutText(img.p, cText, pOrg, C.int(fontFace), C.double(fontScale), sColor, C.int(thickness))
+ return
+}
+
+// PutTextWithParams draws a text string.
+// It renders the specified text string into the img Mat at the location
+// passed in the "org" param, using the desired font face, font scale,
+// color, and line thinkness.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga5126f47f883d730f633d74f07456c576
+//
+func PutTextWithParams(img *Mat, text string, org image.Point, fontFace HersheyFont, fontScale float64, c color.RGBA, thickness int, lineType LineType, bottomLeftOrigin bool) {
+ cText := C.CString(text)
+ defer C.free(unsafe.Pointer(cText))
+
+ pOrg := C.struct_Point{
+ x: C.int(org.X),
+ y: C.int(org.Y),
+ }
+
+ sColor := C.struct_Scalar{
+ val1: C.double(c.B),
+ val2: C.double(c.G),
+ val3: C.double(c.R),
+ val4: C.double(c.A),
+ }
+
+ C.PutTextWithParams(img.p, cText, pOrg, C.int(fontFace), C.double(fontScale), sColor, C.int(thickness), C.int(lineType), C.bool(bottomLeftOrigin))
+ return
+}
+
+// InterpolationFlags are bit flags that control the interpolation algorithm
+// that is used.
+type InterpolationFlags int
+
+const (
+ // InterpolationNearestNeighbor is nearest neighbor. (fast but low quality)
+ InterpolationNearestNeighbor InterpolationFlags = 0
+
+ // InterpolationLinear is bilinear interpolation.
+ InterpolationLinear = 1
+
+ // InterpolationCubic is bicube interpolation.
+ InterpolationCubic = 2
+
+ // InterpolationArea uses pixel area relation. It is preferred for image
+ // decimation as it gives moire-free results.
+ InterpolationArea = 3
+
+ // InterpolationLanczos4 is Lanczos interpolation over 8x8 neighborhood.
+ InterpolationLanczos4 = 4
+
+ // InterpolationDefault is an alias for InterpolationLinear.
+ InterpolationDefault = InterpolationLinear
+
+ // InterpolationMax indicates use maximum interpolation.
+ InterpolationMax = 7
+)
+
+// Resize resizes an image.
+// It resizes the image src down to or up to the specified size, storing the
+// result in dst. Note that src and dst may be the same image. If you wish to
+// scale by factor, an empty sz may be passed and non-zero fx and fy. Likewise,
+// if you wish to scale to an explicit size, a non-empty sz may be passed with
+// zero for both fx and fy.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga47a974309e9102f5f08231edc7e7529d
+func Resize(src Mat, dst *Mat, sz image.Point, fx, fy float64, interp InterpolationFlags) {
+ pSize := C.struct_Size{
+ width: C.int(sz.X),
+ height: C.int(sz.Y),
+ }
+
+ C.Resize(src.p, dst.p, pSize, C.double(fx), C.double(fy), C.int(interp))
+ return
+}
+
+// GetRectSubPix retrieves a pixel rectangle from an image with sub-pixel accuracy.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga77576d06075c1a4b6ba1a608850cd614
+func GetRectSubPix(src Mat, patchSize image.Point, center image.Point, dst *Mat) {
+ sz := C.struct_Size{
+ width: C.int(patchSize.X),
+ height: C.int(patchSize.Y),
+ }
+ pt := C.struct_Point{
+ x: C.int(center.X),
+ y: C.int(center.Y),
+ }
+ C.GetRectSubPix(src.p, sz, pt, dst.p)
+}
+
+// GetRotationMatrix2D calculates an affine matrix of 2D rotation.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#gafbbc470ce83812914a70abfb604f4326
+func GetRotationMatrix2D(center image.Point, angle, scale float64) Mat {
+ pc := C.struct_Point{
+ x: C.int(center.X),
+ y: C.int(center.Y),
+ }
+ return newMat(C.GetRotationMatrix2D(pc, C.double(angle), C.double(scale)))
+}
+
+// WarpAffine applies an affine transformation to an image. For more parameters please check WarpAffineWithParams
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga0203d9ee5fcd28d40dbc4a1ea4451983
+func WarpAffine(src Mat, dst *Mat, m Mat, sz image.Point) {
+ pSize := C.struct_Size{
+ width: C.int(sz.X),
+ height: C.int(sz.Y),
+ }
+
+ C.WarpAffine(src.p, dst.p, m.p, pSize)
+}
+
+// WarpAffineWithParams applies an affine transformation to an image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga0203d9ee5fcd28d40dbc4a1ea4451983
+func WarpAffineWithParams(src Mat, dst *Mat, m Mat, sz image.Point, flags InterpolationFlags, borderType BorderType, borderValue color.RGBA) {
+ pSize := C.struct_Size{
+ width: C.int(sz.X),
+ height: C.int(sz.Y),
+ }
+ bv := C.struct_Scalar{
+ val1: C.double(borderValue.B),
+ val2: C.double(borderValue.G),
+ val3: C.double(borderValue.R),
+ val4: C.double(borderValue.A),
+ }
+ C.WarpAffineWithParams(src.p, dst.p, m.p, pSize, C.int(flags), C.int(borderType), bv)
+}
+
+// WarpPerspective applies a perspective transformation to an image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#gaf73673a7e8e18ec6963e3774e6a94b87
+func WarpPerspective(src Mat, dst *Mat, m Mat, sz image.Point) {
+ pSize := C.struct_Size{
+ width: C.int(sz.X),
+ height: C.int(sz.Y),
+ }
+
+ C.WarpPerspective(src.p, dst.p, m.p, pSize)
+}
+
+// Watershed performs a marker-based image segmentation using the watershed algorithm.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga3267243e4d3f95165d55a618c65ac6e1
+func Watershed(image Mat, markers *Mat) {
+ C.Watershed(image.p, markers.p)
+}
+
+// ColormapTypes are the 12 GNU Octave/MATLAB equivalent colormaps.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html
+type ColormapTypes int
+
+// List of the available color maps
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html#ga9a805d8262bcbe273f16be9ea2055a65
+const (
+ ColormapAutumn ColormapTypes = 0
+ ColormapBone = 1
+ ColormapJet = 2
+ ColormapWinter = 3
+ ColormapRainbow = 4
+ ColormapOcean = 5
+ ColormapSummer = 6
+ ColormapSpring = 7
+ ColormapCool = 8
+ ColormapHsv = 9
+ ColormapPink = 10
+ ColormapHot = 11
+ ColormapParula = 12
+)
+
+// ApplyColorMap applies a GNU Octave/MATLAB equivalent colormap on a given image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html#gadf478a5e5ff49d8aa24e726ea6f65d15
+func ApplyColorMap(src Mat, dst *Mat, colormapType ColormapTypes) {
+ C.ApplyColorMap(src.p, dst.p, C.int(colormapType))
+}
+
+// ApplyCustomColorMap applies a custom defined colormap on a given image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html#gacb22288ddccc55f9bd9e6d492b409cae
+func ApplyCustomColorMap(src Mat, dst *Mat, customColormap Mat) {
+ C.ApplyCustomColorMap(src.p, dst.p, customColormap.p)
+}
+
+// GetPerspectiveTransform returns 3x3 perspective transformation for the
+// corresponding 4 point pairs.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#ga8c1ae0e3589a9d77fffc962c49b22043
+func GetPerspectiveTransform(src, dst []image.Point) Mat {
+ srcPoints := toCPoints(src)
+ dstPoints := toCPoints(dst)
+ return newMat(C.GetPerspectiveTransform(srcPoints, dstPoints))
+}
+
+// DrawContours draws contours outlines or filled contours.
+//
+// For further details, please see:
+// https://docs.opencv.org/3.3.1/d6/d6e/group__imgproc__draw.html#ga746c0625f1781f1ffc9056259103edbc
+func DrawContours(img *Mat, contours [][]image.Point, contourIdx int, c color.RGBA, thickness int) {
+ cntrs := make([]C.struct_Points, len(contours))
+
+ for i, contour := range contours {
+ p := (*C.struct_Point)(C.malloc(C.size_t(C.sizeof_struct_Point * len(contour))))
+ defer C.free(unsafe.Pointer(p))
+
+ pa := getPoints(p, len(contour))
+
+ for j, point := range contour {
+ pa[j] = C.struct_Point{
+ x: C.int(point.X),
+ y: C.int(point.Y),
+ }
+ }
+
+ cntrs[i] = C.struct_Points{
+ points: (*C.Point)(p),
+ length: C.int(len(contour)),
+ }
+ }
+
+ cContours := C.struct_Contours{
+ contours: (*C.struct_Points)(&cntrs[0]),
+ length: C.int(len(contours)),
+ }
+
+ sColor := C.struct_Scalar{
+ val1: C.double(c.B),
+ val2: C.double(c.G),
+ val3: C.double(c.R),
+ val4: C.double(c.A),
+ }
+
+ C.DrawContours(img.p, cContours, C.int(contourIdx), sColor, C.int(thickness))
+}
+
+// Remap applies a generic geometrical transformation to an image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#gab75ef31ce5cdfb5c44b6da5f3b908ea4
+func Remap(src Mat, dst, map1, map2 *Mat, interpolation InterpolationFlags, borderMode BorderType, borderValue color.RGBA) {
+ bv := C.struct_Scalar{
+ val1: C.double(borderValue.B),
+ val2: C.double(borderValue.G),
+ val3: C.double(borderValue.R),
+ val4: C.double(borderValue.A),
+ }
+ C.Remap(src.p, dst.p, map1.p, map2.p, C.int(interpolation), C.int(borderMode), bv)
+}
+
+// Filter2D applies an arbitrary linear filter to an image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga27c049795ce870216ddfb366086b5a04
+func Filter2D(src Mat, dst *Mat, ddepth int, kernel Mat, anchor image.Point, delta float64, borderType BorderType) {
+ anchorP := C.struct_Point{
+ x: C.int(anchor.X),
+ y: C.int(anchor.Y),
+ }
+ C.Filter2D(src.p, dst.p, C.int(ddepth), kernel.p, anchorP, C.double(delta), C.int(borderType))
+}
+
+// SepFilter2D applies a separable linear filter to the image.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#ga910e29ff7d7b105057d1625a4bf6318d
+func SepFilter2D(src Mat, dst *Mat, ddepth int, kernelX, kernelY Mat, anchor image.Point, delta float64, borderType BorderType) {
+ anchorP := C.struct_Point{
+ x: C.int(anchor.X),
+ y: C.int(anchor.Y),
+ }
+ C.SepFilter2D(src.p, dst.p, C.int(ddepth), kernelX.p, kernelY.p, anchorP, C.double(delta), C.int(borderType))
+}
+
+// LogPolar remaps an image to semilog-polar coordinates space.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#gaec3a0b126a85b5ca2c667b16e0ae022d
+func LogPolar(src Mat, dst *Mat, center image.Point, m float64, flags InterpolationFlags) {
+ centerP := C.struct_Point{
+ x: C.int(center.X),
+ y: C.int(center.Y),
+ }
+ C.LogPolar(src.p, dst.p, centerP, C.double(m), C.int(flags))
+}
+
+// LinearPolar remaps an image to polar coordinates space.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html#gaa38a6884ac8b6e0b9bed47939b5362f3
+func LinearPolar(src Mat, dst *Mat, center image.Point, maxRadius float64, flags InterpolationFlags) {
+ centerP := C.struct_Point{
+ x: C.int(center.X),
+ y: C.int(center.Y),
+ }
+ C.LinearPolar(src.p, dst.p, centerP, C.double(maxRadius), C.int(flags))
+}
+
+// DistanceTypes types for Distance Transform and M-estimatorss
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#gaa2bfbebbc5c320526897996aafa1d8eb
+type DistanceTypes int
+
+const (
+ DistUser DistanceTypes = 0
+ DistL1 = 1
+ DistL2 = 2
+ DistC = 3
+ DistL12 = 4
+ DistFair = 5
+ DistWelsch = 6
+ DistHuber = 7
+)
+
+// FitLine fits a line to a 2D or 3D point set.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#gaf849da1fdafa67ee84b1e9a23b93f91f
+func FitLine(pts []image.Point, line *Mat, distType DistanceTypes, param, reps, aeps float64) {
+ cPoints := toCPoints(pts)
+ C.FitLine(cPoints, line.p, C.int(distType), C.double(param), C.double(reps), C.double(aeps))
+}
+
+// CLAHE is a wrapper around the cv::CLAHE algorithm.
+type CLAHE struct {
+ // C.CLAHE
+ p unsafe.Pointer
+}
+
+// NewCLAHE returns a new CLAHE algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/db6/classcv_1_1CLAHE.html
+//
+func NewCLAHE() CLAHE {
+ return CLAHE{p: unsafe.Pointer(C.CLAHE_Create())}
+}
+
+// NewCLAHEWithParams returns a new CLAHE algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/db6/classcv_1_1CLAHE.html
+//
+func NewCLAHEWithParams(clipLimit float64, tileGridSize image.Point) CLAHE {
+ pSize := C.struct_Size{
+ width: C.int(tileGridSize.X),
+ height: C.int(tileGridSize.Y),
+ }
+ return CLAHE{p: unsafe.Pointer(C.CLAHE_CreateWithParams(C.double(clipLimit), pSize))}
+}
+
+// Close CLAHE.
+func (c *CLAHE) Close() error {
+ C.CLAHE_Close((C.CLAHE)(c.p))
+ c.p = nil
+ return nil
+}
+
+// Apply CLAHE.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d6/db6/classcv_1_1CLAHE.html#a4e92e0e427de21be8d1fae8dcd862c5e
+//
+func (c *CLAHE) Apply(src Mat, dst *Mat) {
+ C.CLAHE_Apply((C.CLAHE)(c.p), src.p, dst.p)
+}
+
+func InvertAffineTransform(src Mat, dst *Mat) {
+ C.InvertAffineTransform(src.p, dst.p)
+}
diff --git a/vendor/gocv.io/x/gocv/imgproc.h b/vendor/gocv.io/x/gocv/imgproc.h
new file mode 100644
index 0000000..adacc9b
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/imgproc.h
@@ -0,0 +1,120 @@
+#ifndef _OPENCV3_IMGPROC_H_
+#define _OPENCV3_IMGPROC_H_
+
+#include
+
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+#ifdef __cplusplus
+typedef cv::Ptr* CLAHE;
+#else
+typedef void* CLAHE;
+#endif
+
+#include "core.h"
+
+double ArcLength(Contour curve, bool is_closed);
+Contour ApproxPolyDP(Contour curve, double epsilon, bool closed);
+void CvtColor(Mat src, Mat dst, int code);
+void EqualizeHist(Mat src, Mat dst);
+void CalcHist(struct Mats mats, IntVector chans, Mat mask, Mat hist, IntVector sz, FloatVector rng, bool acc);
+void CalcBackProject(struct Mats mats, IntVector chans, Mat hist, Mat backProject, FloatVector rng, bool uniform);
+double CompareHist(Mat hist1, Mat hist2, int method);
+void ConvexHull(Contour points, Mat hull, bool clockwise, bool returnPoints);
+void ConvexityDefects(Contour points, Mat hull, Mat result);
+void BilateralFilter(Mat src, Mat dst, int d, double sc, double ss);
+void Blur(Mat src, Mat dst, Size ps);
+void BoxFilter(Mat src, Mat dst, int ddepth, Size ps);
+void SqBoxFilter(Mat src, Mat dst, int ddepth, Size ps);
+void Dilate(Mat src, Mat dst, Mat kernel);
+void DistanceTransform(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType);
+void Erode(Mat src, Mat dst, Mat kernel);
+void MatchTemplate(Mat image, Mat templ, Mat result, int method, Mat mask);
+struct Moment Moments(Mat src, bool binaryImage);
+void PyrDown(Mat src, Mat dst, Size dstsize, int borderType);
+void PyrUp(Mat src, Mat dst, Size dstsize, int borderType);
+struct Rect BoundingRect(Contour con);
+void BoxPoints(RotatedRect rect, Mat boxPts);
+double ContourArea(Contour con);
+struct RotatedRect MinAreaRect(Points points);
+struct RotatedRect FitEllipse(Points points);
+void MinEnclosingCircle(Points points, Point2f* center, float* radius);
+struct Contours FindContours(Mat src, int mode, int method);
+int ConnectedComponents(Mat src, Mat dst, int connectivity, int ltype, int ccltype);
+int ConnectedComponentsWithStats(Mat src, Mat labels, Mat stats, Mat centroids, int connectivity, int ltype, int ccltype);
+
+void GaussianBlur(Mat src, Mat dst, Size ps, double sX, double sY, int bt);
+void Laplacian(Mat src, Mat dst, int dDepth, int kSize, double scale, double delta, int borderType);
+void Scharr(Mat src, Mat dst, int dDepth, int dx, int dy, double scale, double delta,
+ int borderType);
+Mat GetStructuringElement(int shape, Size ksize);
+Scalar MorphologyDefaultBorderValue();
+void MorphologyEx(Mat src, Mat dst, int op, Mat kernel);
+void MorphologyExWithParams(Mat src, Mat dst, int op, Mat kernel, Point pt, int iterations, int borderType);
+void MedianBlur(Mat src, Mat dst, int ksize);
+
+void Canny(Mat src, Mat edges, double t1, double t2);
+void CornerSubPix(Mat img, Mat corners, Size winSize, Size zeroZone, TermCriteria criteria);
+void GoodFeaturesToTrack(Mat img, Mat corners, int maxCorners, double quality, double minDist);
+void GrabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount, int mode);
+void HoughCircles(Mat src, Mat circles, int method, double dp, double minDist);
+void HoughCirclesWithParams(Mat src, Mat circles, int method, double dp, double minDist,
+ double param1, double param2, int minRadius, int maxRadius);
+void HoughLines(Mat src, Mat lines, double rho, double theta, int threshold);
+void HoughLinesP(Mat src, Mat lines, double rho, double theta, int threshold);
+void HoughLinesPWithParams(Mat src, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap);
+void HoughLinesPointSet(Mat points, Mat lines, int lines_max, int threshold,
+ double min_rho, double max_rho, double rho_step,
+ double min_theta, double max_theta, double theta_step);
+void Integral(Mat src, Mat sum, Mat sqsum, Mat tilted);
+void Threshold(Mat src, Mat dst, double thresh, double maxvalue, int typ);
+void AdaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveTyp, int typ, int blockSize,
+ double c);
+
+void ArrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness);
+void Circle(Mat img, Point center, int radius, Scalar color, int thickness);
+void Ellipse(Mat img, Point center, Point axes, double angle, double
+ startAngle, double endAngle, Scalar color, int thickness);
+void Line(Mat img, Point pt1, Point pt2, Scalar color, int thickness);
+void Rectangle(Mat img, Rect rect, Scalar color, int thickness);
+void FillPoly(Mat img, Contours points, Scalar color);
+struct Size GetTextSize(const char* text, int fontFace, double fontScale, int thickness);
+void PutText(Mat img, const char* text, Point org, int fontFace, double fontScale,
+ Scalar color, int thickness);
+void PutTextWithParams(Mat img, const char* text, Point org, int fontFace, double fontScale,
+ Scalar color, int thickness, int lineType, bool bottomLeftOrigin);
+void Resize(Mat src, Mat dst, Size sz, double fx, double fy, int interp);
+void GetRectSubPix(Mat src, Size patchSize, Point center, Mat dst);
+Mat GetRotationMatrix2D(Point center, double angle, double scale);
+void WarpAffine(Mat src, Mat dst, Mat rot_mat, Size dsize);
+void WarpAffineWithParams(Mat src, Mat dst, Mat rot_mat, Size dsize, int flags, int borderMode,
+ Scalar borderValue);
+void WarpPerspective(Mat src, Mat dst, Mat m, Size dsize);
+void Watershed(Mat image, Mat markers);
+void ApplyColorMap(Mat src, Mat dst, int colormap);
+void ApplyCustomColorMap(Mat src, Mat dst, Mat colormap);
+Mat GetPerspectiveTransform(Contour src, Contour dst);
+void DrawContours(Mat src, Contours contours, int contourIdx, Scalar color, int thickness);
+void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType);
+void SpatialGradient(Mat src, Mat dx, Mat dy, int ksize, int borderType);
+void Remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue);
+void Filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType);
+void SepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType);
+void LogPolar(Mat src, Mat dst, Point center, double m, int flags);
+void FitLine(Contour points, Mat line, int distType, double param, double reps, double aeps);
+void LinearPolar(Mat src, Mat dst, Point center, double maxRadius, int flags);
+bool ClipLine(Size imgSize, Point pt1, Point pt2);
+CLAHE CLAHE_Create();
+CLAHE CLAHE_CreateWithParams(double clipLimit, Size tileGridSize);
+void CLAHE_Close(CLAHE c);
+void CLAHE_Apply(CLAHE c, Mat src, Mat dst);
+void InvertAffineTransform(Mat src, Mat dst);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_IMGPROC_H_
diff --git a/vendor/gocv.io/x/gocv/imgproc_colorcodes.go b/vendor/gocv.io/x/gocv/imgproc_colorcodes.go
new file mode 100644
index 0000000..00f4315
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/imgproc_colorcodes.go
@@ -0,0 +1,351 @@
+package gocv
+
+// ColorConversionCode is a color conversion code used on Mat.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga4e0972be5de079fed4e3a10e24ef5ef0
+//
+type ColorConversionCode int
+
+const (
+ // ColorBGRToBGRA adds alpha channel to BGR image.
+ ColorBGRToBGRA ColorConversionCode = 0
+
+ // ColorBGRAToBGR removes alpha channel from BGR image.
+ ColorBGRAToBGR = 1
+
+ // ColorBGRToRGBA converts from BGR to RGB with alpha channel.
+ ColorBGRToRGBA = 2
+
+ // ColorRGBAToBGR converts from RGB with alpha to BGR color space.
+ ColorRGBAToBGR = 3
+
+ // ColorBGRToRGB converts from BGR to RGB without alpha channel.
+ ColorBGRToRGB = 4
+
+ // ColorBGRAToRGBA converts from BGR with alpha channel
+ // to RGB with alpha channel.
+ ColorBGRAToRGBA = 5
+
+ // ColorBGRToGray converts from BGR to grayscale.
+ ColorBGRToGray = 6
+
+ // ColorRGBToGray converts from RGB to grayscale.
+ ColorRGBToGray = 7
+
+ // ColorGrayToBGR converts from grayscale to BGR.
+ ColorGrayToBGR = 8
+
+ // ColorGrayToBGRA converts from grayscale to BGR with alpha channel.
+ ColorGrayToBGRA = 9
+
+ // ColorBGRAToGray converts from BGR with alpha channel to grayscale.
+ ColorBGRAToGray = 10
+
+ // ColorRGBAToGray converts from RGB with alpha channel to grayscale.
+ ColorRGBAToGray = 11
+
+ // ColorBGRToBGR565 converts from BGR to BGR565 (16-bit images).
+ ColorBGRToBGR565 = 12
+
+ // ColorRGBToBGR565 converts from RGB to BGR565 (16-bit images).
+ ColorRGBToBGR565 = 13
+
+ // ColorBGR565ToBGR converts from BGR565 (16-bit images) to BGR.
+ ColorBGR565ToBGR = 14
+
+ // ColorBGR565ToRGB converts from BGR565 (16-bit images) to RGB.
+ ColorBGR565ToRGB = 15
+
+ // ColorBGRAToBGR565 converts from BGRA (with alpha channel)
+ // to BGR565 (16-bit images).
+ ColorBGRAToBGR565 = 16
+
+ // ColorRGBAToBGR565 converts from RGBA (with alpha channel)
+ // to BGR565 (16-bit images).
+ ColorRGBAToBGR565 = 17
+
+ // ColorBGR565ToBGRA converts from BGR565 (16-bit images)
+ // to BGRA (with alpha channel).
+ ColorBGR565ToBGRA = 18
+
+ // ColorBGR565ToRGBA converts from BGR565 (16-bit images)
+ // to RGBA (with alpha channel).
+ ColorBGR565ToRGBA = 19
+
+ // ColorGrayToBGR565 converts from grayscale
+ // to BGR565 (16-bit images).
+ ColorGrayToBGR565 = 20
+
+ // ColorBGR565ToGray converts from BGR565 (16-bit images)
+ // to grayscale.
+ ColorBGR565ToGray = 21
+
+ // ColorBGRToBGR555 converts from BGR to BGR555 (16-bit images).
+ ColorBGRToBGR555 = 22
+
+ // ColorRGBToBGR555 converts from RGB to BGR555 (16-bit images).
+ ColorRGBToBGR555 = 23
+
+ // ColorBGR555ToBGR converts from BGR555 (16-bit images) to BGR.
+ ColorBGR555ToBGR = 24
+
+ // ColorBGR555ToRGB converts from BGR555 (16-bit images) to RGB.
+ ColorBGR555ToRGB = 25
+
+ // ColorBGRAToBGR555 converts from BGRA (with alpha channel)
+ // to BGR555 (16-bit images).
+ ColorBGRAToBGR555 = 26
+
+ // ColorRGBAToBGR555 converts from RGBA (with alpha channel)
+ // to BGR555 (16-bit images).
+ ColorRGBAToBGR555 = 27
+
+ // ColorBGR555ToBGRA converts from BGR555 (16-bit images)
+ // to BGRA (with alpha channel).
+ ColorBGR555ToBGRA = 28
+
+ // ColorBGR555ToRGBA converts from BGR555 (16-bit images)
+ // to RGBA (with alpha channel).
+ ColorBGR555ToRGBA = 29
+
+ // ColorGrayToBGR555 converts from grayscale to BGR555 (16-bit images).
+ ColorGrayToBGR555 = 30
+
+ // ColorBGR555ToGRAY converts from BGR555 (16-bit images) to grayscale.
+ ColorBGR555ToGRAY = 31
+
+ // ColorBGRToXYZ converts from BGR to CIE XYZ.
+ ColorBGRToXYZ = 32
+
+ // ColorRGBToXYZ converts from RGB to CIE XYZ.
+ ColorRGBToXYZ = 33
+
+ // ColorXYZToBGR converts from CIE XYZ to BGR.
+ ColorXYZToBGR = 34
+
+ // ColorXYZToRGB converts from CIE XYZ to RGB.
+ ColorXYZToRGB = 35
+
+ // ColorBGRToYCrCb converts from BGR to luma-chroma (aka YCC).
+ ColorBGRToYCrCb = 36
+
+ // ColorRGBToYCrCb converts from RGB to luma-chroma (aka YCC).
+ ColorRGBToYCrCb = 37
+
+ // ColorYCrCbToBGR converts from luma-chroma (aka YCC) to BGR.
+ ColorYCrCbToBGR = 38
+
+ // ColorYCrCbToRGB converts from luma-chroma (aka YCC) to RGB.
+ ColorYCrCbToRGB = 39
+
+ // ColorBGRToHSV converts from BGR to HSV (hue saturation value).
+ ColorBGRToHSV = 40
+
+ // ColorRGBToHSV converts from RGB to HSV (hue saturation value).
+ ColorRGBToHSV = 41
+
+ // ColorBGRToLab converts from BGR to CIE Lab.
+ ColorBGRToLab = 44
+
+ // ColorRGBToLab converts from RGB to CIE Lab.
+ ColorRGBToLab = 45
+
+ // ColorBGRToLuv converts from BGR to CIE Luv.
+ ColorBGRToLuv = 50
+
+ // ColorRGBToLuv converts from RGB to CIE Luv.
+ ColorRGBToLuv = 51
+
+ // ColorBGRToHLS converts from BGR to HLS (hue lightness saturation).
+ ColorBGRToHLS = 52
+
+ // ColorRGBToHLS converts from RGB to HLS (hue lightness saturation).
+ ColorRGBToHLS = 53
+
+ // ColorHSVToBGR converts from HSV (hue saturation value) to BGR.
+ ColorHSVToBGR = 54
+
+ // ColorHSVToRGB converts from HSV (hue saturation value) to RGB.
+ ColorHSVToRGB = 55
+
+ // ColorLabToBGR converts from CIE Lab to BGR.
+ ColorLabToBGR = 56
+
+ // ColorLabToRGB converts from CIE Lab to RGB.
+ ColorLabToRGB = 57
+
+ // ColorLuvToBGR converts from CIE Luv to BGR.
+ ColorLuvToBGR = 58
+
+ // ColorLuvToRGB converts from CIE Luv to RGB.
+ ColorLuvToRGB = 59
+
+ // ColorHLSToBGR converts from HLS (hue lightness saturation) to BGR.
+ ColorHLSToBGR = 60
+
+ // ColorHLSToRGB converts from HLS (hue lightness saturation) to RGB.
+ ColorHLSToRGB = 61
+
+ // ColorBGRToHSVFull converts from BGR to HSV (hue saturation value) full.
+ ColorBGRToHSVFull = 66
+
+ // ColorRGBToHSVFull converts from RGB to HSV (hue saturation value) full.
+ ColorRGBToHSVFull = 67
+
+ // ColorBGRToHLSFull converts from BGR to HLS (hue lightness saturation) full.
+ ColorBGRToHLSFull = 68
+
+ // ColorRGBToHLSFull converts from RGB to HLS (hue lightness saturation) full.
+ ColorRGBToHLSFull = 69
+
+ // ColorHSVToBGRFull converts from HSV (hue saturation value) to BGR full.
+ ColorHSVToBGRFull = 70
+
+ // ColorHSVToRGBFull converts from HSV (hue saturation value) to RGB full.
+ ColorHSVToRGBFull = 71
+
+ // ColorHLSToBGRFull converts from HLS (hue lightness saturation) to BGR full.
+ ColorHLSToBGRFull = 72
+
+ // ColorHLSToRGBFull converts from HLS (hue lightness saturation) to RGB full.
+ ColorHLSToRGBFull = 73
+
+ // ColorLBGRToLab converts from LBGR to CIE Lab.
+ ColorLBGRToLab = 74
+
+ // ColorLRGBToLab converts from LRGB to CIE Lab.
+ ColorLRGBToLab = 75
+
+ // ColorLBGRToLuv converts from LBGR to CIE Luv.
+ ColorLBGRToLuv = 76
+
+ // ColorLRGBToLuv converts from LRGB to CIE Luv.
+ ColorLRGBToLuv = 77
+
+ // ColorLabToLBGR converts from CIE Lab to LBGR.
+ ColorLabToLBGR = 78
+
+ // ColorLabToLRGB converts from CIE Lab to LRGB.
+ ColorLabToLRGB = 79
+
+ // ColorLuvToLBGR converts from CIE Luv to LBGR.
+ ColorLuvToLBGR = 80
+
+ // ColorLuvToLRGB converts from CIE Luv to LRGB.
+ ColorLuvToLRGB = 81
+
+ // ColorBGRToYUV converts from BGR to YUV.
+ ColorBGRToYUV = 82
+
+ // ColorRGBToYUV converts from RGB to YUV.
+ ColorRGBToYUV = 83
+
+ // ColorYUVToBGR converts from YUV to BGR.
+ ColorYUVToBGR = 84
+
+ // ColorYUVToRGB converts from YUV to RGB.
+ ColorYUVToRGB = 85
+
+ // ColorYUVToRGBNV12 converts from YUV 4:2:0 to RGB NV12.
+ ColorYUVToRGBNV12 = 90
+
+ // ColorYUVToBGRNV12 converts from YUV 4:2:0 to BGR NV12.
+ ColorYUVToBGRNV12 = 91
+
+ // ColorYUVToRGBNV21 converts from YUV 4:2:0 to RGB NV21.
+ ColorYUVToRGBNV21 = 92
+
+ // ColorYUVToBGRNV21 converts from YUV 4:2:0 to BGR NV21.
+ ColorYUVToBGRNV21 = 93
+
+ // ColorYUVToRGBANV12 converts from YUV 4:2:0 to RGBA NV12.
+ ColorYUVToRGBANV12 = 94
+
+ // ColorYUVToBGRANV12 converts from YUV 4:2:0 to BGRA NV12.
+ ColorYUVToBGRANV12 = 95
+
+ // ColorYUVToRGBANV21 converts from YUV 4:2:0 to RGBA NV21.
+ ColorYUVToRGBANV21 = 96
+
+ // ColorYUVToBGRANV21 converts from YUV 4:2:0 to BGRA NV21.
+ ColorYUVToBGRANV21 = 97
+
+ ColorYUVToRGBYV12 = 98
+ ColorYUVToBGRYV12 = 99
+ ColorYUVToRGBIYUV = 100
+ ColorYUVToBGRIYUV = 101
+
+ ColorYUVToRGBAYV12 = 102
+ ColorYUVToBGRAYV12 = 103
+ ColorYUVToRGBAIYUV = 104
+ ColorYUVToBGRAIYUV = 105
+
+ ColorYUVToGRAY420 = 106
+
+ // YUV 4:2:2 family to RGB
+ ColorYUVToRGBUYVY = 107
+ ColorYUVToBGRUYVY = 108
+
+ ColorYUVToRGBAUYVY = 111
+ ColorYUVToBGRAUYVY = 112
+
+ ColorYUVToRGBYUY2 = 115
+ ColorYUVToBGRYUY2 = 116
+ ColorYUVToRGBYVYU = 117
+ ColorYUVToBGRYVYU = 118
+
+ ColorYUVToRGBAYUY2 = 119
+ ColorYUVToBGRAYUY2 = 120
+ ColorYUVToRGBAYVYU = 121
+ ColorYUVToBGRAYVYU = 122
+
+ ColorYUVToGRAYUYVY = 123
+ ColorYUVToGRAYYUY2 = 124
+
+ // alpha premultiplication
+ ColorRGBATomRGBA = 125
+ ColormRGBAToRGBA = 126
+
+ // RGB to YUV 4:2:0 family
+ ColorRGBToYUVI420 = 127
+ ColorBGRToYUVI420 = 128
+
+ ColorRGBAToYUVI420 = 129
+ ColorBGRAToYUVI420 = 130
+ ColorRGBToYUVYV12 = 131
+ ColorBGRToYUVYV12 = 132
+ ColorRGBAToYUVYV12 = 133
+ ColorBGRAToYUVYV12 = 134
+
+ // Demosaicing
+ ColorBayerBGToBGR = 46
+ ColorBayerGBToBGR = 47
+ ColorBayerRGToBGR = 48
+ ColorBayerGRToBGR = 49
+
+ ColorBayerBGToGRAY = 86
+ ColorBayerGBToGRAY = 87
+ ColorBayerRGToGRAY = 88
+ ColorBayerGRToGRAY = 89
+
+ // Demosaicing using Variable Number of Gradients
+ ColorBayerBGToBGRVNG = 62
+ ColorBayerGBToBGRVNG = 63
+ ColorBayerRGToBGRVNG = 64
+ ColorBayerGRToBGRVNG = 65
+
+ // Edge-Aware Demosaicing
+ ColorBayerBGToBGREA = 135
+ ColorBayerGBToBGREA = 136
+ ColorBayerRGToBGREA = 137
+ ColorBayerGRToBGREA = 138
+
+ // Demosaicing with alpha channel
+ ColorBayerBGToBGRA = 139
+ ColorBayerGBToBGRA = 140
+ ColorBayerRGToBGRA = 141
+ ColorBayerGRToBGRA = 142
+
+ ColorCOLORCVTMAX = 143
+)
diff --git a/vendor/gocv.io/x/gocv/imgproc_colorcodes_string.go b/vendor/gocv.io/x/gocv/imgproc_colorcodes_string.go
new file mode 100644
index 0000000..ac226ef
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/imgproc_colorcodes_string.go
@@ -0,0 +1,303 @@
+package gocv
+
+func (c ColorConversionCode) String() string {
+ switch c {
+ case ColorBGRToBGRA:
+ return "color-bgr-to-bgra"
+ case ColorBGRAToBGR:
+ return "color-bgra-to-bgr"
+ case ColorBGRToRGBA:
+ return "color-bgr-to-rgba"
+ case ColorRGBAToBGR:
+ return "color-rgba-to-bgr"
+ case ColorBGRToRGB:
+ return "color-bgr-to-rgb"
+ case ColorBGRAToRGBA:
+ return "color-bgra-to-rgba"
+ case ColorBGRToGray:
+ return "color-bgr-to-gray"
+ case ColorRGBToGray:
+ return "color-rgb-to-gray"
+ case ColorGrayToBGR:
+ return "color-gray-to-bgr"
+ case ColorGrayToBGRA:
+ return "color-gray-to-bgra"
+ case ColorBGRAToGray:
+ return "color-bgra-to-gray"
+ case ColorRGBAToGray:
+ return "color-rgba-to-gray"
+ case ColorBGRToBGR565:
+ return "color-bgr-to-bgr565"
+ case ColorRGBToBGR565:
+ return "color-rgb-to-bgr565"
+ case ColorBGR565ToBGR:
+ return "color-bgr565-to-bgr"
+ case ColorBGR565ToRGB:
+ return "color-bgr565-to-rgb"
+ case ColorBGRAToBGR565:
+ return "color-bgra-to-bgr565"
+ case ColorRGBAToBGR565:
+ return "color-rgba-to-bgr565"
+ case ColorBGR565ToBGRA:
+ return "color-bgr565-to-bgra"
+ case ColorBGR565ToRGBA:
+ return "color-bgr565-to-rgba"
+ case ColorGrayToBGR565:
+ return "color-gray-to-bgr565"
+ case ColorBGR565ToGray:
+ return "color-bgr565-to-gray"
+ case ColorBGRToBGR555:
+ return "color-bgr-to-bgr555"
+ case ColorRGBToBGR555:
+ return "color-rgb-to-bgr555"
+ case ColorBGR555ToBGR:
+ return "color-bgr555-to-bgr"
+ case ColorBGRAToBGR555:
+ return "color-bgra-to-bgr555"
+ case ColorRGBAToBGR555:
+ return "color-rgba-to-bgr555"
+ case ColorBGR555ToBGRA:
+ return "color-bgr555-to-bgra"
+ case ColorBGR555ToRGBA:
+ return "color-bgr555-to-rgba"
+ case ColorGrayToBGR555:
+ return "color-gray-to-bgr555"
+ case ColorBGR555ToGRAY:
+ return "color-bgr555-to-gray"
+ case ColorBGRToXYZ:
+ return "color-bgr-to-xyz"
+ case ColorRGBToXYZ:
+ return "color-rgb-to-xyz"
+ case ColorXYZToBGR:
+ return "color-xyz-to-bgr"
+ case ColorXYZToRGB:
+ return "color-xyz-to-rgb"
+ case ColorBGRToYCrCb:
+ return "color-bgr-to-ycrcb"
+ case ColorRGBToYCrCb:
+ return "color-rgb-to-ycrcb"
+ case ColorYCrCbToBGR:
+ return "color-ycrcb-to-bgr"
+ case ColorYCrCbToRGB:
+ return "color-ycrcb-to-rgb"
+ case ColorBGRToHSV:
+ return "color-bgr-to-hsv"
+ case ColorRGBToHSV:
+ return "color-rgb-to-hsv"
+ case ColorBGRToLab:
+ return "color-bgr-to-lab"
+ case ColorRGBToLab:
+ return "color-rgb-to-lab"
+ case ColorBGRToLuv:
+ return "color-bgr-to-luv"
+ case ColorRGBToLuv:
+ return "color-rgb-to-luv"
+ case ColorBGRToHLS:
+ return "color-bgr-to-hls"
+ case ColorRGBToHLS:
+ return "color-rgb-to-hls"
+ case ColorHSVToBGR:
+ return "color-hsv-to-bgr"
+ case ColorHSVToRGB:
+ return "color-hsv-to-rgb"
+ case ColorLabToBGR:
+ return "color-lab-to-bgr"
+ case ColorLabToRGB:
+ return "color-lab-to-rgb"
+ case ColorLuvToBGR:
+ return "color-luv-to-bgr"
+ case ColorLuvToRGB:
+ return "color-luv-to-rgb"
+ case ColorHLSToBGR:
+ return "color-hls-to-bgr"
+ case ColorHLSToRGB:
+ return "color-hls-to-rgb"
+ case ColorBGRToHSVFull:
+ return "color-bgr-to-hsv-full"
+ case ColorRGBToHSVFull:
+ return "color-rgb-to-hsv-full"
+ case ColorBGRToHLSFull:
+ return "color-bgr-to-hls-full"
+ case ColorRGBToHLSFull:
+ return "color-rgb-to-hls-full"
+ case ColorHSVToBGRFull:
+ return "color-hsv-to-bgr-full"
+ case ColorHSVToRGBFull:
+ return "color-hsv-to-rgb-full"
+ case ColorHLSToBGRFull:
+ return "color-hls-to-bgr-full"
+ case ColorHLSToRGBFull:
+ return "color-hls-to-rgb-full"
+ case ColorLBGRToLab:
+ return "color-lbgr-to-lab"
+ case ColorLRGBToLab:
+ return "color-lrgb-to-lab"
+ case ColorLBGRToLuv:
+ return "color-lbgr-to-luv"
+ case ColorLRGBToLuv:
+ return "color-lrgb-to-luv"
+ case ColorLabToLBGR:
+ return "color-lab-to-lbgr"
+ case ColorLabToLRGB:
+ return "color-lab-to-lrgb"
+ case ColorLuvToLBGR:
+ return "color-luv-to-lbgr"
+ case ColorLuvToLRGB:
+ return "color-luv-to-lrgb"
+ case ColorBGRToYUV:
+ return "color-bgr-to-yuv"
+ case ColorRGBToYUV:
+ return "color-rgb-to-yuv"
+ case ColorYUVToBGR:
+ return "color-yuv-to-bgr"
+ case ColorYUVToRGB:
+ return "color-yuv-to-rgb"
+
+ case ColorYUVToRGBNV12:
+ return "color-yuv-to-rgbnv12"
+ case ColorYUVToBGRNV12:
+ return "color-yuv-to-bgrnv12"
+ case ColorYUVToRGBNV21:
+ return "color-yuv-to-rgbnv21"
+ case ColorYUVToBGRNV21:
+ return "color-yuv-to-bgrnv21"
+
+ case ColorYUVToRGBANV12:
+ return "color-yuv-to-rgbanv12"
+ case ColorYUVToBGRANV12:
+ return "color-yuv-to-bgranv12"
+ case ColorYUVToRGBANV21:
+ return "color-yuv-to-rgbanv21"
+ case ColorYUVToBGRANV21:
+ return "color-yuv-to-bgranv21"
+
+ case ColorYUVToRGBYV12:
+ return "color-yuv-to-rgbyv12"
+ case ColorYUVToBGRYV12:
+ return "color-yuv-to-bgryv12"
+
+ case ColorYUVToRGBIYUV:
+ return "color-yuv-to-rgbiyuv"
+ case ColorYUVToBGRIYUV:
+ return "color-yuv-to-bgriyuv"
+
+ case ColorYUVToRGBAYV12:
+ return "color-yuv-to-rgbayv12"
+ case ColorYUVToBGRAYV12:
+ return "color-yuv-to-bgrayv12"
+ case ColorYUVToRGBAIYUV:
+ return "color-yuv-to-rgbaiyuv"
+ case ColorYUVToBGRAIYUV:
+ return "color-yuv-to-bgraiyuv"
+
+ case ColorYUVToGRAY420:
+ return "color-yuv-to-gray420"
+
+ case ColorYUVToRGBUYVY:
+ return "color-yuv-to-rgbuyvy"
+ case ColorYUVToBGRUYVY:
+ return "color-yuv-to-bgruyvy"
+
+ case ColorYUVToRGBAUYVY:
+ return "color-yuv-to-rgbauyvy"
+ case ColorYUVToBGRAUYVY:
+ return "color-yuv-to-bgrauyvy"
+
+ case ColorYUVToRGBYUY2:
+ return "color-yuv-to-rgbyuy2"
+ case ColorYUVToBGRYUY2:
+ return "color-yuv-to-bgryuy2"
+
+ case ColorYUVToRGBYVYU:
+ return "color-yuv-to-rgbyvyu"
+ case ColorYUVToBGRYVYU:
+ return "color-yuv-to-bgryvyu"
+
+ case ColorYUVToRGBAYUY2:
+ return "color-yuv-to-rgbayuy2"
+ case ColorYUVToBGRAYUY2:
+ return "color-yuv-to-bgrayuy2"
+
+ case ColorYUVToRGBAYVYU:
+ return "color-yuv-to-rgbayvyu"
+ case ColorYUVToBGRAYVYU:
+ return "color-yuv-to-bgrayvyu"
+
+ case ColorYUVToGRAYUYVY:
+ return "color-yuv-to-grayuyvy"
+ case ColorYUVToGRAYYUY2:
+ return "color-yuv-to-grayyuy2"
+
+ case ColorRGBATomRGBA:
+ return "color-rgba-to-mrgba"
+ case ColormRGBAToRGBA:
+ return "color-mrgba-to-rgba"
+
+ case ColorRGBToYUVI420:
+ return "color-rgb-to-yuvi420"
+ case ColorBGRToYUVI420:
+ return "color-bgr-to-yuvi420"
+
+ case ColorRGBAToYUVI420:
+ return "color-rgba-to-yuvi420"
+
+ case ColorBGRAToYUVI420:
+ return "color-bgra-to-yuvi420"
+ case ColorRGBToYUVYV12:
+ return "color-rgb-to-yuvyv12"
+ case ColorBGRToYUVYV12:
+ return "color-bgr-to-yuvyv12"
+ case ColorRGBAToYUVYV12:
+ return "color-rgba-to-yuvyv12"
+ case ColorBGRAToYUVYV12:
+ return "color-bgra-to-yuvyv12"
+
+ case ColorBayerBGToBGR:
+ return "color-bayer-bgt-to-bgr"
+ case ColorBayerGBToBGR:
+ return "color-bayer-gbt-to-bgr"
+ case ColorBayerRGToBGR:
+ return "color-bayer-rgt-to-bgr"
+ case ColorBayerGRToBGR:
+ return "color-bayer-grt-to-bgr"
+
+ case ColorBayerBGToGRAY:
+ return "color-bayer-bgt-to-gray"
+ case ColorBayerGBToGRAY:
+ return "color-bayer-gbt-to-gray"
+ case ColorBayerRGToGRAY:
+ return "color-bayer-rgt-to-gray"
+ case ColorBayerGRToGRAY:
+ return "color-bayer-grt-to-gray"
+
+ case ColorBayerBGToBGRVNG:
+ return "color-bayer-bgt-to-bgrvng"
+ case ColorBayerGBToBGRVNG:
+ return "color-bayer-gbt-to-bgrvng"
+ case ColorBayerRGToBGRVNG:
+ return "color-bayer-rgt-to-bgrvng"
+ case ColorBayerGRToBGRVNG:
+ return "color-bayer-grt-to-bgrvng"
+
+ case ColorBayerBGToBGREA:
+ return "color-bayer-bgt-to-bgrea"
+ case ColorBayerGBToBGREA:
+ return "color-bayer-gbt-to-bgrea"
+ case ColorBayerRGToBGREA:
+ return "color-bayer-rgt-to-bgrea"
+ case ColorBayerGRToBGREA:
+ return "color-bayer-grt-to-bgrea"
+
+ case ColorBayerBGToBGRA:
+ return "color-bayer-bgt-to-bgra"
+ case ColorBayerGBToBGRA:
+ return "color-bayer-gbt-to-bgra"
+ case ColorBayerRGToBGRA:
+ return "color-bayer-rgt-to-bgra"
+ case ColorBayerGRToBGRA:
+ return "color-bayer-grt-to-bgra"
+ case ColorCOLORCVTMAX:
+ return "color-color-cvt-max"
+ }
+ return ""
+}
diff --git a/vendor/gocv.io/x/gocv/imgproc_string.go b/vendor/gocv.io/x/gocv/imgproc_string.go
new file mode 100644
index 0000000..589c7cb
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/imgproc_string.go
@@ -0,0 +1,333 @@
+package gocv
+
+func (c HistCompMethod) String() string {
+ switch c {
+ case HistCmpCorrel:
+ return "hist-cmp-correl"
+ case HistCmpChiSqr:
+ return "hist-cmp-chi-sqr"
+ case HistCmpIntersect:
+ return "hist-cmp-intersect"
+ case HistCmpBhattacharya:
+ return "hist-cmp-bhattacharya"
+ case HistCmpChiSqrAlt:
+ return "hist-cmp-chi-sqr-alt"
+ case HistCmpKlDiv:
+ return "hist-cmp-kl-div"
+ }
+ return ""
+}
+
+func (c DistanceTransformLabelTypes) String() string {
+ switch c {
+ case DistanceLabelCComp:
+ return "distance-label-ccomp"
+ }
+ return ""
+}
+
+func (c DistanceTransformMasks) String() string {
+ switch c {
+ case DistanceMask3:
+ return "distance-mask3"
+ }
+ return ""
+}
+
+func (c RetrievalMode) String() string {
+ switch c {
+ case RetrievalExternal:
+ return "retrieval-external"
+ case RetrievalList:
+ return "retrieval-list"
+ case RetrievalCComp:
+ return "retrieval-ccomp"
+ case RetrievalTree:
+ return "retrieval-tree"
+ case RetrievalFloodfill:
+ return "retrieval-floodfill"
+ }
+ return ""
+}
+
+func (c ContourApproximationMode) String() string {
+ switch c {
+ case ChainApproxNone:
+ return "chain-approx-none"
+ case ChainApproxSimple:
+ return "chain-approx-simple"
+ case ChainApproxTC89L1:
+ return "chain-approx-tc89l1"
+ case ChainApproxTC89KCOS:
+ return "chain-approx-tc89kcos"
+ }
+ return ""
+}
+
+func (c ConnectedComponentsAlgorithmType) String() string {
+ switch c {
+ case CCL_WU:
+ return "ccl-wu"
+ case CCL_DEFAULT:
+ return "ccl-default"
+ case CCL_GRANA:
+ return "ccl-grana"
+ }
+ return ""
+}
+
+func (c ConnectedComponentsTypes) String() string {
+ switch c {
+ case CC_STAT_LEFT:
+ return "cc-stat-left"
+ case CC_STAT_TOP:
+ return "cc-stat-top"
+ case CC_STAT_WIDTH:
+ return "cc-stat-width"
+ case CC_STAT_AREA:
+ return "cc-stat-area"
+ case CC_STAT_MAX:
+ return "cc-stat-max"
+ case CC_STAT_HEIGHT:
+ return "cc-stat-height"
+ }
+ return ""
+}
+
+func (c TemplateMatchMode) String() string {
+ switch c {
+ case TmSqdiff:
+ return "tm-sq-diff"
+ case TmSqdiffNormed:
+ return "tm-sq-diff-normed"
+ case TmCcorr:
+ return "tm-ccorr"
+ case TmCcorrNormed:
+ return "tm-ccorr-normed"
+ case TmCcoeff:
+ return "tm-ccoeff"
+ case TmCcoeffNormed:
+ return "tm-ccoeff-normed"
+ }
+ return ""
+}
+
+func (c MorphShape) String() string {
+ switch c {
+ case MorphRect:
+ return "morph-rect"
+ case MorphCross:
+ return "morph-cross"
+ case MorphEllipse:
+ return "morph-ellispe"
+ }
+ return ""
+}
+
+func (c MorphType) String() string {
+ switch c {
+ case MorphErode:
+ return "morph-erode"
+ case MorphDilate:
+ return "morph-dilate"
+ case MorphOpen:
+ return "morph-open"
+ case MorphClose:
+ return "morph-close"
+ case MorphGradient:
+ return "morph-gradient"
+ case MorphTophat:
+ return "morph-tophat"
+ case MorphBlackhat:
+ return "morph-blackhat"
+ case MorphHitmiss:
+ return "morph-hitmiss"
+ }
+ return ""
+}
+
+func (c BorderType) String() string {
+ switch c {
+ case BorderConstant:
+ return "border-constant"
+ case BorderReplicate:
+ return "border-replicate"
+ case BorderReflect:
+ return "border-reflect"
+ case BorderWrap:
+ return "border-wrap"
+ case BorderTransparent:
+ return "border-transparent"
+ case BorderDefault:
+ return "border-default"
+ case BorderIsolated:
+ return "border-isolated"
+ }
+ return ""
+}
+
+func (c GrabCutMode) String() string {
+ switch c {
+ case GCInitWithRect:
+ return "gc-init-with-rect"
+ case GCInitWithMask:
+ return "gc-init-with-mask"
+ case GCEval:
+ return "gc-eval"
+ case GCEvalFreezeModel:
+ return "gc-eval-freeze-model"
+ }
+ return ""
+}
+
+func (c HoughMode) String() string {
+ switch c {
+ case HoughStandard:
+ return "hough-standard"
+ case HoughProbabilistic:
+ return "hough-probabilistic"
+ case HoughMultiScale:
+ return "hough-multi-scale"
+ case HoughGradient:
+ return "hough-gradient"
+ }
+ return ""
+}
+
+func (c ThresholdType) String() string {
+ switch c {
+ case ThresholdBinary:
+ return "threshold-binary"
+ case ThresholdBinaryInv:
+ return "threshold-binary-inv"
+ case ThresholdTrunc:
+ return "threshold-trunc"
+ case ThresholdToZero:
+ return "threshold-to-zero"
+ case ThresholdToZeroInv:
+ return "threshold-to-zero-inv"
+ case ThresholdMask:
+ return "threshold-mask"
+ case ThresholdOtsu:
+ return "threshold-otsu"
+ case ThresholdTriangle:
+ return "threshold-triangle"
+ }
+ return ""
+}
+
+func (c AdaptiveThresholdType) String() string {
+ switch c {
+ case AdaptiveThresholdMean:
+ return "adaptative-threshold-mean"
+ case AdaptiveThresholdGaussian:
+ return "adaptative-threshold-gaussian"
+ }
+ return ""
+}
+
+func (c HersheyFont) String() string {
+ switch c {
+ case FontHersheySimplex:
+ return "font-hershey-simplex"
+ case FontHersheyPlain:
+ return "font-hershey-plain"
+ case FontHersheyDuplex:
+ return "font-hershey-duplex"
+ case FontHersheyComplex:
+ return "font-hershey-complex"
+ case FontHersheyTriplex:
+ return "font-hershey-triplex"
+ case FontHersheyComplexSmall:
+ return "font-hershey-complex-small"
+ case FontHersheyScriptSimplex:
+ return "font-hershey-script-simplex"
+ case FontHersheyScriptComplex:
+ return "font-hershey-scipt-complex"
+ case FontItalic:
+ return "font-italic"
+ }
+ return ""
+}
+
+func (c LineType) String() string {
+ switch c {
+ case Filled:
+ return "filled"
+ case Line4:
+ return "line4"
+ case Line8:
+ return "line8"
+ case LineAA:
+ return "line-aa"
+ }
+ return ""
+}
+
+func (c InterpolationFlags) String() string {
+ switch c {
+ case InterpolationNearestNeighbor:
+ return "interpolation-nearest-neighbor"
+ case InterpolationLinear:
+ return "interpolation-linear"
+ case InterpolationCubic:
+ return "interpolation-cubic"
+ case InterpolationArea:
+ return "interpolation-area"
+ case InterpolationLanczos4:
+ return "interpolation-lanczos4"
+ case InterpolationMax:
+ return "interpolation-max"
+ }
+ return ""
+}
+
+func (c ColormapTypes) String() string {
+ switch c {
+ case ColormapAutumn:
+ return "colormap-autumn"
+ case ColormapBone:
+ return "colormap-bone"
+ case ColormapJet:
+ return "colormap-jet"
+ case ColormapWinter:
+ return "colormap-winter"
+ case ColormapRainbow:
+ return "colormap-rainbow"
+ case ColormapOcean:
+ return "colormap-ocean"
+ case ColormapSummer:
+ return "colormap-summer"
+ case ColormapSpring:
+ return "colormap-spring"
+ case ColormapCool:
+ return "colormap-cool"
+ case ColormapHsv:
+ return "colormap-hsv"
+ case ColormapPink:
+ return "colormap-pink"
+ case ColormapParula:
+ return "colormap-parula"
+ }
+ return ""
+}
+
+func (c DistanceTypes) String() string {
+ switch c {
+ case DistUser:
+ return "dist-user"
+ case DistL1:
+ return "dist-l1"
+ case DistL2:
+ return "dist-l2"
+ case DistL12:
+ return "dist-l12"
+ case DistFair:
+ return "dist-fair"
+ case DistWelsch:
+ return "dist-welsch"
+ case DistHuber:
+ return "dist-huber"
+ }
+ return ""
+}
diff --git a/vendor/gocv.io/x/gocv/mat_noprofile.go b/vendor/gocv.io/x/gocv/mat_noprofile.go
new file mode 100644
index 0000000..0ae8a16
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/mat_noprofile.go
@@ -0,0 +1,21 @@
+// +build !matprofile
+
+package gocv
+
+/*
+#include
+#include "core.h"
+*/
+import "C"
+
+// newMat returns a new Mat from a C Mat
+func newMat(p C.Mat) Mat {
+ return Mat{p: p}
+}
+
+// Close the Mat object.
+func (m *Mat) Close() error {
+ C.Mat_Close(m.p)
+ m.p = nil
+ return nil
+}
diff --git a/vendor/gocv.io/x/gocv/mat_profile.go b/vendor/gocv.io/x/gocv/mat_profile.go
new file mode 100644
index 0000000..b921cab
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/mat_profile.go
@@ -0,0 +1,74 @@
+// +build matprofile
+
+package gocv
+
+/*
+#include
+#include "core.h"
+*/
+import (
+ "C"
+)
+
+import (
+ "runtime/pprof"
+)
+
+// MatProfile a pprof.Profile that contains stack traces that led to (currently)
+// unclosed Mat's creations. Every time a Mat is created, the stack trace is
+// added to this profile and every time the Mat is closed the trace is removed.
+// In a program that is not leaking, this profile's count should not
+// continuously increase and ideally when a program is terminated the count
+// should be zero. You can get the count at any time with:
+//
+// gocv.MatProfile.Count()
+//
+// and you can display the current entries with:
+//
+// var b bytes.Buffer
+// gocv.MatProfile.WriteTo(&b, 1)
+// fmt.Print(b.String())
+//
+// This will display stack traces of where the unclosed Mats were instantiated.
+// For example, the results could look something like this:
+//
+// 1 @ 0x4146a0c 0x4146a57 0x4119666 0x40bb18f 0x405a841
+// # 0x4146a0b gocv.io/x/gocv.newMat+0x4b /go/src/gocv.io/x/gocv/core.go:120
+// # 0x4146a56 gocv.io/x/gocv.NewMat+0x26 /go/src/gocv.io/x/gocv/core.go:126
+// # 0x4119665 gocv.io/x/gocv.TestMat+0x25 /go/src/gocv.io/x/gocv/core_test.go:29
+// # 0x40bb18e testing.tRunner+0xbe /usr/local/Cellar/go/1.11/libexec/src/testing/testing.go:827
+//
+// Furthermore, if the program is a long running process or if gocv is being used on a
+// web server, it may be helpful to install the HTTP interface using:
+//
+// import _ "net/http/pprof"
+//
+// In order to include the MatProfile custom profiler, you MUST build or run your application
+// or tests using the following build tag:
+// -tags matprofile
+//
+// For more information, see the runtime/pprof package documentation.
+var MatProfile *pprof.Profile
+
+func init() {
+ profName := "gocv.io/x/gocv.Mat"
+ MatProfile = pprof.Lookup(profName)
+ if MatProfile == nil {
+ MatProfile = pprof.NewProfile(profName)
+ }
+}
+
+// newMat returns a new Mat from a C Mat and records it to the MatProfile.
+func newMat(p C.Mat) Mat {
+ m := Mat{p: p}
+ MatProfile.Add(p, 1)
+ return m
+}
+
+// Close the Mat object.
+func (m *Mat) Close() error {
+ C.Mat_Close(m.p)
+ MatProfile.Remove(m.p)
+ m.p = nil
+ return nil
+}
diff --git a/vendor/gocv.io/x/gocv/objdetect.cpp b/vendor/gocv.io/x/gocv/objdetect.cpp
new file mode 100644
index 0000000..d5df10f
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/objdetect.cpp
@@ -0,0 +1,151 @@
+#include "objdetect.h"
+
+// CascadeClassifier
+
+CascadeClassifier CascadeClassifier_New() {
+ return new cv::CascadeClassifier();
+}
+
+void CascadeClassifier_Close(CascadeClassifier cs) {
+ delete cs;
+}
+
+int CascadeClassifier_Load(CascadeClassifier cs, const char* name) {
+ return cs->load(name);
+}
+
+struct Rects CascadeClassifier_DetectMultiScale(CascadeClassifier cs, Mat img) {
+ std::vector detected;
+ cs->detectMultiScale(*img, detected); // uses all default parameters
+ Rect* rects = new Rect[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ Rect r = {detected[i].x, detected[i].y, detected[i].width, detected[i].height};
+ rects[i] = r;
+ }
+
+ Rects ret = {rects, (int)detected.size()};
+ return ret;
+}
+
+struct Rects CascadeClassifier_DetectMultiScaleWithParams(CascadeClassifier cs, Mat img,
+ double scale, int minNeighbors, int flags, Size minSize, Size maxSize) {
+
+ cv::Size minSz(minSize.width, minSize.height);
+ cv::Size maxSz(maxSize.width, maxSize.height);
+
+ std::vector detected;
+ cs->detectMultiScale(*img, detected, scale, minNeighbors, flags, minSz, maxSz);
+ Rect* rects = new Rect[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ Rect r = {detected[i].x, detected[i].y, detected[i].width, detected[i].height};
+ rects[i] = r;
+ }
+
+ Rects ret = {rects, (int)detected.size()};
+ return ret;
+}
+
+// HOGDescriptor
+
+HOGDescriptor HOGDescriptor_New() {
+ return new cv::HOGDescriptor();
+}
+
+void HOGDescriptor_Close(HOGDescriptor hog) {
+ delete hog;
+}
+
+int HOGDescriptor_Load(HOGDescriptor hog, const char* name) {
+ return hog->load(name);
+}
+
+struct Rects HOGDescriptor_DetectMultiScale(HOGDescriptor hog, Mat img) {
+ std::vector detected;
+ hog->detectMultiScale(*img, detected);
+ Rect* rects = new Rect[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ Rect r = {detected[i].x, detected[i].y, detected[i].width, detected[i].height};
+ rects[i] = r;
+ }
+
+ Rects ret = {rects, (int)detected.size()};
+ return ret;
+}
+
+struct Rects HOGDescriptor_DetectMultiScaleWithParams(HOGDescriptor hog, Mat img,
+ double hitThresh, Size winStride, Size padding, double scale, double finalThresh,
+ bool useMeanshiftGrouping) {
+
+ cv::Size wSz(winStride.width, winStride.height);
+ cv::Size pSz(padding.width, padding.height);
+
+ std::vector detected;
+ hog->detectMultiScale(*img, detected, hitThresh, wSz, pSz, scale, finalThresh,
+ useMeanshiftGrouping);
+ Rect* rects = new Rect[detected.size()];
+
+ for (size_t i = 0; i < detected.size(); ++i) {
+ Rect r = {detected[i].x, detected[i].y, detected[i].width, detected[i].height};
+ rects[i] = r;
+ }
+
+ Rects ret = {rects, (int)detected.size()};
+ return ret;
+}
+
+Mat HOG_GetDefaultPeopleDetector() {
+ return new cv::Mat(cv::HOGDescriptor::getDefaultPeopleDetector());
+}
+
+void HOGDescriptor_SetSVMDetector(HOGDescriptor hog, Mat det) {
+ hog->setSVMDetector(*det);
+}
+
+struct Rects GroupRectangles(struct Rects rects, int groupThreshold, double eps) {
+ std::vector vRect;
+
+ for (int i = 0; i < rects.length; ++i) {
+ cv::Rect r = cv::Rect(rects.rects[i].x, rects.rects[i].y, rects.rects[i].width,
+ rects.rects[i].height);
+ vRect.push_back(r);
+ }
+
+ cv::groupRectangles(vRect, groupThreshold, eps);
+
+ Rect* results = new Rect[vRect.size()];
+
+ for (size_t i = 0; i < vRect.size(); ++i) {
+ Rect r = {vRect[i].x, vRect[i].y, vRect[i].width, vRect[i].height};
+ results[i] = r;
+ }
+
+ Rects ret = {results, (int)vRect.size()};
+ return ret;
+}
+
+// QRCodeDetector
+
+QRCodeDetector QRCodeDetector_New() {
+ return new cv::QRCodeDetector();
+}
+
+void QRCodeDetector_Close(QRCodeDetector qr) {
+ delete qr;
+}
+
+const char* QRCodeDetector_DetectAndDecode(QRCodeDetector qr, Mat input,Mat points,Mat straight_qrcode) {
+ cv::String *str = new cv::String(qr->detectAndDecode(*input,*points,*straight_qrcode));
+ return str->c_str();
+}
+
+bool QRCodeDetector_Detect(QRCodeDetector qr, Mat input,Mat points) {
+ return qr->detect(*input,*points);
+}
+
+const char* QRCodeDetector_Decode(QRCodeDetector qr, Mat input,Mat inputPoints,Mat straight_qrcode) {
+ cv::String *str = new cv::String(qr->detectAndDecode(*input,*inputPoints,*straight_qrcode));
+ return str->c_str();
+}
diff --git a/vendor/gocv.io/x/gocv/objdetect.go b/vendor/gocv.io/x/gocv/objdetect.go
new file mode 100644
index 0000000..9a31d1a
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/objdetect.go
@@ -0,0 +1,240 @@
+package gocv
+
+/*
+#include
+#include "objdetect.h"
+*/
+import "C"
+import (
+ "image"
+ "unsafe"
+)
+
+// CascadeClassifier is a cascade classifier class for object detection.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d1/de5/classcv_1_1CascadeClassifier.html
+//
+type CascadeClassifier struct {
+ p C.CascadeClassifier
+}
+
+// NewCascadeClassifier returns a new CascadeClassifier.
+func NewCascadeClassifier() CascadeClassifier {
+ return CascadeClassifier{p: C.CascadeClassifier_New()}
+}
+
+// Close deletes the CascadeClassifier's pointer.
+func (c *CascadeClassifier) Close() error {
+ C.CascadeClassifier_Close(c.p)
+ c.p = nil
+ return nil
+}
+
+// Load cascade classifier from a file.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d1/de5/classcv_1_1CascadeClassifier.html#a1a5884c8cc749422f9eb77c2471958bc
+//
+func (c *CascadeClassifier) Load(name string) bool {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ return C.CascadeClassifier_Load(c.p, cName) != 0
+}
+
+// DetectMultiScale detects objects of different sizes in the input Mat image.
+// The detected objects are returned as a slice of image.Rectangle structs.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d1/de5/classcv_1_1CascadeClassifier.html#aaf8181cb63968136476ec4204ffca498
+//
+func (c *CascadeClassifier) DetectMultiScale(img Mat) []image.Rectangle {
+ ret := C.CascadeClassifier_DetectMultiScale(c.p, img.p)
+ defer C.Rects_Close(ret)
+
+ return toRectangles(ret)
+}
+
+// DetectMultiScaleWithParams calls DetectMultiScale but allows setting parameters
+// to values other than just the defaults.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d1/de5/classcv_1_1CascadeClassifier.html#aaf8181cb63968136476ec4204ffca498
+//
+func (c *CascadeClassifier) DetectMultiScaleWithParams(img Mat, scale float64,
+ minNeighbors, flags int, minSize, maxSize image.Point) []image.Rectangle {
+
+ minSz := C.struct_Size{
+ width: C.int(minSize.X),
+ height: C.int(minSize.Y),
+ }
+
+ maxSz := C.struct_Size{
+ width: C.int(maxSize.X),
+ height: C.int(maxSize.Y),
+ }
+
+ ret := C.CascadeClassifier_DetectMultiScaleWithParams(c.p, img.p, C.double(scale),
+ C.int(minNeighbors), C.int(flags), minSz, maxSz)
+ defer C.Rects_Close(ret)
+
+ return toRectangles(ret)
+}
+
+// HOGDescriptor is a Histogram Of Gradiants (HOG) for object detection.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a723b95b709cfd3f95cf9e616de988fc8
+//
+type HOGDescriptor struct {
+ p C.HOGDescriptor
+}
+
+// NewHOGDescriptor returns a new HOGDescriptor.
+func NewHOGDescriptor() HOGDescriptor {
+ return HOGDescriptor{p: C.HOGDescriptor_New()}
+}
+
+// Close deletes the HOGDescriptor's pointer.
+func (h *HOGDescriptor) Close() error {
+ C.HOGDescriptor_Close(h.p)
+ h.p = nil
+ return nil
+}
+
+// DetectMultiScale detects objects in the input Mat image.
+// The detected objects are returned as a slice of image.Rectangle structs.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a660e5cd036fd5ddf0f5767b352acd948
+//
+func (h *HOGDescriptor) DetectMultiScale(img Mat) []image.Rectangle {
+ ret := C.HOGDescriptor_DetectMultiScale(h.p, img.p)
+ defer C.Rects_Close(ret)
+
+ return toRectangles(ret)
+}
+
+// DetectMultiScaleWithParams calls DetectMultiScale but allows setting parameters
+// to values other than just the defaults.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a660e5cd036fd5ddf0f5767b352acd948
+//
+func (h *HOGDescriptor) DetectMultiScaleWithParams(img Mat, hitThresh float64,
+ winStride, padding image.Point, scale, finalThreshold float64, useMeanshiftGrouping bool) []image.Rectangle {
+ wSz := C.struct_Size{
+ width: C.int(winStride.X),
+ height: C.int(winStride.Y),
+ }
+
+ pSz := C.struct_Size{
+ width: C.int(padding.X),
+ height: C.int(padding.Y),
+ }
+
+ ret := C.HOGDescriptor_DetectMultiScaleWithParams(h.p, img.p, C.double(hitThresh),
+ wSz, pSz, C.double(scale), C.double(finalThreshold), C.bool(useMeanshiftGrouping))
+ defer C.Rects_Close(ret)
+
+ return toRectangles(ret)
+}
+
+// HOGDefaultPeopleDetector returns a new Mat with the HOG DefaultPeopleDetector.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a660e5cd036fd5ddf0f5767b352acd948
+//
+func HOGDefaultPeopleDetector() Mat {
+ return newMat(C.HOG_GetDefaultPeopleDetector())
+}
+
+// SetSVMDetector sets the data for the HOGDescriptor.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d5/d33/structcv_1_1HOGDescriptor.html#a09e354ad701f56f9c550dc0385dc36f1
+//
+func (h *HOGDescriptor) SetSVMDetector(det Mat) error {
+ C.HOGDescriptor_SetSVMDetector(h.p, det.p)
+ return nil
+}
+
+// GroupRectangles groups the object candidate rectangles.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d5/d54/group__objdetect.html#ga3dba897ade8aa8227edda66508e16ab9
+//
+func GroupRectangles(rects []image.Rectangle, groupThreshold int, eps float64) []image.Rectangle {
+ cRectArray := make([]C.struct_Rect, len(rects))
+ for i, r := range rects {
+ cRect := C.struct_Rect{
+ x: C.int(r.Min.X),
+ y: C.int(r.Min.Y),
+ width: C.int(r.Size().X),
+ height: C.int(r.Size().Y),
+ }
+ cRectArray[i] = cRect
+ }
+ cRects := C.struct_Rects{
+ rects: (*C.Rect)(&cRectArray[0]),
+ length: C.int(len(rects)),
+ }
+
+ ret := C.GroupRectangles(cRects, C.int(groupThreshold), C.double(eps))
+
+ return toRectangles(ret)
+}
+
+// QRCodeDetector groups the object candidate rectangles.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html
+//
+type QRCodeDetector struct {
+ p C.QRCodeDetector
+}
+
+// newQRCodeDetector returns a new QRCodeDetector from a C QRCodeDetector
+func newQRCodeDetector(p C.QRCodeDetector) QRCodeDetector {
+ return QRCodeDetector{p: p}
+}
+
+func NewQRCodeDetector() QRCodeDetector {
+ return newQRCodeDetector(C.QRCodeDetector_New())
+}
+
+func (a *QRCodeDetector) Close() error {
+ C.QRCodeDetector_Close(a.p)
+ a.p = nil
+ return nil
+}
+
+// DetectAndDecode Both detects and decodes QR code.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a7290bd6a5d59b14a37979c3a14fbf394
+//
+func (a *QRCodeDetector) DetectAndDecode(input Mat, points *Mat, straight_qrcode *Mat) string {
+ goResult := C.GoString(C.QRCodeDetector_DetectAndDecode(a.p, input.p, points.p, straight_qrcode.p))
+ return string(goResult)
+}
+
+// Detect detects QR code in image and returns the quadrangle containing the code.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a64373f7d877d27473f64fe04bb57d22b
+//
+func (a *QRCodeDetector) Detect(input Mat, points *Mat) bool {
+ result := C.QRCodeDetector_Detect(a.p, input.p, points.p)
+ return bool(result)
+}
+
+// Decode decodes QR code in image once it's found by the detect() method. Returns UTF8-encoded output string or empty string if the code cannot be decoded.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/de/dc3/classcv_1_1QRCodeDetector.html#a4172c2eb4825c844fb1b0ae67202d329
+//
+func (a *QRCodeDetector) Decode(input Mat, points Mat, straight_qrcode *Mat) string {
+ goResult := C.GoString(C.QRCodeDetector_DetectAndDecode(a.p, input.p, points.p, straight_qrcode.p))
+ return string(goResult)
+}
diff --git a/vendor/gocv.io/x/gocv/objdetect.h b/vendor/gocv.io/x/gocv/objdetect.h
new file mode 100644
index 0000000..468d9c7
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/objdetect.h
@@ -0,0 +1,53 @@
+#ifndef _OPENCV3_OBJDETECT_H_
+#define _OPENCV3_OBJDETECT_H_
+
+#include
+
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+#include "core.h"
+
+#ifdef __cplusplus
+typedef cv::CascadeClassifier* CascadeClassifier;
+typedef cv::HOGDescriptor* HOGDescriptor;
+typedef cv::QRCodeDetector* QRCodeDetector;
+#else
+typedef void* CascadeClassifier;
+typedef void* HOGDescriptor;
+typedef void* QRCodeDetector;
+#endif
+
+// CascadeClassifier
+CascadeClassifier CascadeClassifier_New();
+void CascadeClassifier_Close(CascadeClassifier cs);
+int CascadeClassifier_Load(CascadeClassifier cs, const char* name);
+struct Rects CascadeClassifier_DetectMultiScale(CascadeClassifier cs, Mat img);
+struct Rects CascadeClassifier_DetectMultiScaleWithParams(CascadeClassifier cs, Mat img,
+ double scale, int minNeighbors, int flags, Size minSize, Size maxSize);
+
+HOGDescriptor HOGDescriptor_New();
+void HOGDescriptor_Close(HOGDescriptor hog);
+int HOGDescriptor_Load(HOGDescriptor hog, const char* name);
+struct Rects HOGDescriptor_DetectMultiScale(HOGDescriptor hog, Mat img);
+struct Rects HOGDescriptor_DetectMultiScaleWithParams(HOGDescriptor hog, Mat img,
+ double hitThresh, Size winStride, Size padding, double scale, double finalThreshold,
+ bool useMeanshiftGrouping);
+Mat HOG_GetDefaultPeopleDetector();
+void HOGDescriptor_SetSVMDetector(HOGDescriptor hog, Mat det);
+
+struct Rects GroupRectangles(struct Rects rects, int groupThreshold, double eps);
+
+QRCodeDetector QRCodeDetector_New();
+const char* QRCodeDetector_DetectAndDecode(QRCodeDetector qr, Mat input,Mat points,Mat straight_qrcode);
+bool QRCodeDetector_Detect(QRCodeDetector qr, Mat input,Mat points);
+const char* QRCodeDetector_Decode(QRCodeDetector qr, Mat input,Mat inputPoints,Mat straight_qrcode);
+void QRCodeDetector_Close(QRCodeDetector qr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_OBJDETECT_H_
diff --git a/vendor/gocv.io/x/gocv/svd.cpp b/vendor/gocv.io/x/gocv/svd.cpp
new file mode 100644
index 0000000..0e0e82d
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/svd.cpp
@@ -0,0 +1,5 @@
+#include "svd.h"
+
+void SVD_Compute(Mat src, Mat w, Mat u, Mat vt) {
+ cv::SVD::compute(*src, *w, *u, *vt, 0);
+}
\ No newline at end of file
diff --git a/vendor/gocv.io/x/gocv/svd.go b/vendor/gocv.io/x/gocv/svd.go
new file mode 100644
index 0000000..16f26ef
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/svd.go
@@ -0,0 +1,14 @@
+package gocv
+
+/*
+#include
+#include "svd.h"
+*/
+import "C"
+
+// SVDCompute decomposes matrix and stores the results to user-provided matrices
+//
+// https://docs.opencv.org/4.1.2/df/df7/classcv_1_1SVD.html#a76f0b2044df458160292045a3d3714c6
+func SVDCompute(src Mat, w, u, vt *Mat) {
+ C.SVD_Compute(src.Ptr(), w.Ptr(), u.Ptr(), vt.Ptr())
+}
diff --git a/vendor/gocv.io/x/gocv/svd.h b/vendor/gocv.io/x/gocv/svd.h
new file mode 100644
index 0000000..e3aab12
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/svd.h
@@ -0,0 +1,18 @@
+#ifndef _OPENCV3_SVD_H_
+#define _OPENCV3_SVD_H_
+
+#ifdef __cplusplus
+#include
+
+extern "C" {
+#endif
+
+#include "core.h"
+
+void SVD_Compute(Mat src, Mat w, Mat u, Mat vt);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_SVD_H
\ No newline at end of file
diff --git a/vendor/gocv.io/x/gocv/travis_build_opencv.sh b/vendor/gocv.io/x/gocv/travis_build_opencv.sh
new file mode 100644
index 0000000..a2fb550
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/travis_build_opencv.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+set -eux -o pipefail
+
+OPENCV_VERSION=${OPENCV_VERSION:-4.2.0}
+
+#GRAPHICAL=ON
+GRAPHICAL=${GRAPHICAL:-OFF}
+
+# OpenCV looks for libjpeg in /usr/lib/libjpeg.so, for some reason. However,
+# it does not seem to be there in 14.04. Create a link
+
+mkdir -p $HOME/usr/lib
+
+if [[ ! -f "$HOME/usr/lib/libjpeg.so" ]]; then
+ ln -s /usr/lib/x86_64-linux-gnu/libjpeg.so $HOME/usr/lib/libjpeg.so
+fi
+
+# Same for libpng.so
+
+if [[ ! -f "$HOME/usr/lib/libpng.so" ]]; then
+ ln -s /usr/lib/x86_64-linux-gnu/libpng.so $HOME/usr/lib/libpng.so
+fi
+
+# Build OpenCV
+if [[ ! -e "$HOME/usr/installed-${OPENCV_VERSION}" ]]; then
+TMP=$(mktemp -d)
+if [[ ! -d "opencv-${OPENCV_VERSION}/build" ]]; then
+ curl -sL https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip > ${TMP}/opencv.zip
+ unzip -q ${TMP}/opencv.zip
+ mkdir opencv-${OPENCV_VERSION}/build
+ rm ${TMP}/opencv.zip
+fi
+
+if [[ ! -d "opencv_contrib-${OPENCV_VERSION}/modules" ]]; then
+ curl -sL https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip > ${TMP}/opencv-contrib.zip
+ unzip -q ${TMP}/opencv-contrib.zip
+ rm ${TMP}/opencv-contrib.zip
+fi
+rmdir ${TMP}
+
+cd opencv-${OPENCV_VERSION}/build
+cmake -D WITH_IPP=${GRAPHICAL} \
+ -D WITH_OPENGL=${GRAPHICAL} \
+ -D WITH_QT=${GRAPHICAL} \
+ -D BUILD_EXAMPLES=OFF \
+ -D BUILD_TESTS=OFF \
+ -D BUILD_PERF_TESTS=OFF \
+ -D BUILD_opencv_java=OFF \
+ -D BUILD_opencv_python=OFF \
+ -D BUILD_opencv_python2=OFF \
+ -D BUILD_opencv_python3=OFF \
+ -D OPENCV_GENERATE_PKGCONFIG=ON \
+ -D CMAKE_INSTALL_PREFIX=$HOME/usr \
+ -D OPENCV_ENABLE_NONFREE=ON \
+ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules ..
+make -j8
+make install && touch $HOME/usr/installed-${OPENCV_VERSION}
+
+# caffe test data
+if [[ ! -d "${HOME}/testdata" ]]; then
+ mkdir ${HOME}/testdata
+fi
+
+#if [[ ! -f "${HOME}/testdata/bvlc_googlenet.prototxt" ]]; then
+ curl -sL https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/bvlc_googlenet.prototxt > ${HOME}/testdata/bvlc_googlenet.prototxt
+#fi
+
+#if [[ ! -f "${HOME}/testdata/bvlc_googlenet.caffemodel" ]]; then
+ curl -sL http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel > ${HOME}/testdata/bvlc_googlenet.caffemodel
+#fi
+
+#if [[ ! -f "${HOME}/testdata/tensorflow_inception_graph.pb" ]]; then
+ curl -sL https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip > ${HOME}/testdata/inception5h.zip
+ unzip -o ${HOME}/testdata/inception5h.zip tensorflow_inception_graph.pb -d ${HOME}/testdata
+#fi
+
+cd ../..
+touch $HOME/fresh-cache
+fi
diff --git a/vendor/gocv.io/x/gocv/version.cpp b/vendor/gocv.io/x/gocv/version.cpp
new file mode 100644
index 0000000..d4aa165
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/version.cpp
@@ -0,0 +1,5 @@
+#include "version.h"
+
+const char* openCVVersion() {
+ return CV_VERSION;
+}
diff --git a/vendor/gocv.io/x/gocv/version.go b/vendor/gocv.io/x/gocv/version.go
new file mode 100644
index 0000000..00fbdcf
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/version.go
@@ -0,0 +1,20 @@
+package gocv
+
+/*
+#include
+#include "version.h"
+*/
+import "C"
+
+// GoCVVersion of this package, for display purposes.
+const GoCVVersion = "0.22.0"
+
+// Version returns the current golang package version
+func Version() string {
+ return GoCVVersion
+}
+
+// OpenCVVersion returns the current OpenCV lib version
+func OpenCVVersion() string {
+ return C.GoString(C.openCVVersion())
+}
diff --git a/vendor/gocv.io/x/gocv/version.h b/vendor/gocv.io/x/gocv/version.h
new file mode 100644
index 0000000..3372e57
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/version.h
@@ -0,0 +1,17 @@
+#ifndef _OPENCV3_VERSION_H_
+#define _OPENCV3_VERSION_H_
+
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+#include "core.h"
+
+const char* openCVVersion();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_VERSION_H_
diff --git a/vendor/gocv.io/x/gocv/video.cpp b/vendor/gocv.io/x/gocv/video.cpp
new file mode 100644
index 0000000..29775e8
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/video.cpp
@@ -0,0 +1,49 @@
+#include "video.h"
+
+BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_Create() {
+ return new cv::Ptr(cv::createBackgroundSubtractorMOG2());
+}
+
+BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_CreateWithParams(int history, double varThreshold, bool detectShadows) {
+ return new cv::Ptr(cv::createBackgroundSubtractorMOG2(history,varThreshold,detectShadows));
+}
+
+BackgroundSubtractorKNN BackgroundSubtractorKNN_Create() {
+ return new cv::Ptr(cv::createBackgroundSubtractorKNN());
+}
+
+BackgroundSubtractorKNN BackgroundSubtractorKNN_CreateWithParams(int history, double dist2Threshold, bool detectShadows) {
+ return new cv::Ptr(cv::createBackgroundSubtractorKNN(history,dist2Threshold,detectShadows));
+}
+
+void BackgroundSubtractorMOG2_Close(BackgroundSubtractorMOG2 b) {
+ delete b;
+}
+
+void BackgroundSubtractorMOG2_Apply(BackgroundSubtractorMOG2 b, Mat src, Mat dst) {
+ (*b)->apply(*src, *dst);
+}
+
+void BackgroundSubtractorKNN_Close(BackgroundSubtractorKNN k) {
+ delete k;
+}
+
+void BackgroundSubtractorKNN_Apply(BackgroundSubtractorKNN k, Mat src, Mat dst) {
+ (*k)->apply(*src, *dst);
+}
+
+void CalcOpticalFlowFarneback(Mat prevImg, Mat nextImg, Mat flow, double scale, int levels,
+ int winsize, int iterations, int polyN, double polySigma, int flags) {
+ cv::calcOpticalFlowFarneback(*prevImg, *nextImg, *flow, scale, levels, winsize, iterations, polyN,
+ polySigma, flags);
+}
+
+void CalcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err) {
+ cv::calcOpticalFlowPyrLK(*prevImg, *nextImg, *prevPts, *nextPts, *status, *err);
+}
+
+void CalcOpticalFlowPyrLKWithParams(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold){
+ cv::Size sz(winSize.width, winSize.height);
+ cv::calcOpticalFlowPyrLK(*prevImg, *nextImg, *prevPts, *nextPts, *status, *err, sz, maxLevel, *criteria, flags, minEigThreshold);
+}
+
diff --git a/vendor/gocv.io/x/gocv/video.go b/vendor/gocv.io/x/gocv/video.go
new file mode 100644
index 0000000..a202554
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/video.go
@@ -0,0 +1,157 @@
+package gocv
+
+/*
+#include
+#include "video.h"
+*/
+import "C"
+import (
+ "image"
+ "unsafe"
+)
+
+/**
+ cv::OPTFLOW_USE_INITIAL_FLOW = 4,
+ cv::OPTFLOW_LK_GET_MIN_EIGENVALS = 8,
+ cv::OPTFLOW_FARNEBACK_GAUSSIAN = 256
+ For further details, please see: https://docs.opencv.org/master/dc/d6b/group__video__track.html#gga2c6cc144c9eee043575d5b311ac8af08a9d4430ac75199af0cf6fcdefba30eafe
+*/
+const (
+ OptflowUseInitialFlow = 4
+ OptflowLkGetMinEigenvals = 8
+ OptflowFarnebackGaussian = 256
+)
+
+// BackgroundSubtractorMOG2 is a wrapper around the cv::BackgroundSubtractorMOG2.
+type BackgroundSubtractorMOG2 struct {
+ // C.BackgroundSubtractorMOG2
+ p unsafe.Pointer
+}
+
+// NewBackgroundSubtractorMOG2 returns a new BackgroundSubtractor algorithm
+// of type MOG2. MOG2 is a Gaussian Mixture-based Background/Foreground
+// Segmentation Algorithm.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/de/de1/group__video__motion.html#ga2beb2dee7a073809ccec60f145b6b29c
+// https://docs.opencv.org/master/d7/d7b/classcv_1_1BackgroundSubtractorMOG2.html
+//
+func NewBackgroundSubtractorMOG2() BackgroundSubtractorMOG2 {
+ return BackgroundSubtractorMOG2{p: unsafe.Pointer(C.BackgroundSubtractorMOG2_Create())}
+}
+
+// NewBackgroundSubtractorMOG2WithParams returns a new BackgroundSubtractor algorithm
+// of type MOG2 with customized parameters. MOG2 is a Gaussian Mixture-based Background/Foreground
+// Segmentation Algorithm.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/de/de1/group__video__motion.html#ga2beb2dee7a073809ccec60f145b6b29c
+// https://docs.opencv.org/master/d7/d7b/classcv_1_1BackgroundSubtractorMOG2.html
+//
+func NewBackgroundSubtractorMOG2WithParams(history int, varThreshold float64, detectShadows bool) BackgroundSubtractorMOG2 {
+ return BackgroundSubtractorMOG2{p: unsafe.Pointer(C.BackgroundSubtractorMOG2_CreateWithParams(C.int(history), C.double(varThreshold), C.bool(detectShadows)))}
+}
+
+// Close BackgroundSubtractorMOG2.
+func (b *BackgroundSubtractorMOG2) Close() error {
+ C.BackgroundSubtractorMOG2_Close((C.BackgroundSubtractorMOG2)(b.p))
+ b.p = nil
+ return nil
+}
+
+// Apply computes a foreground mask using the current BackgroundSubtractorMOG2.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/df6/classcv_1_1BackgroundSubtractor.html#aa735e76f7069b3fa9c3f32395f9ccd21
+//
+func (b *BackgroundSubtractorMOG2) Apply(src Mat, dst *Mat) {
+ C.BackgroundSubtractorMOG2_Apply((C.BackgroundSubtractorMOG2)(b.p), src.p, dst.p)
+ return
+}
+
+// BackgroundSubtractorKNN is a wrapper around the cv::BackgroundSubtractorKNN.
+type BackgroundSubtractorKNN struct {
+ // C.BackgroundSubtractorKNN
+ p unsafe.Pointer
+}
+
+// NewBackgroundSubtractorKNN returns a new BackgroundSubtractor algorithm
+// of type KNN. K-Nearest Neighbors (KNN) uses a Background/Foreground
+// Segmentation Algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/de/de1/group__video__motion.html#gac9be925771f805b6fdb614ec2292006d
+// https://docs.opencv.org/master/db/d88/classcv_1_1BackgroundSubtractorKNN.html
+//
+func NewBackgroundSubtractorKNN() BackgroundSubtractorKNN {
+ return BackgroundSubtractorKNN{p: unsafe.Pointer(C.BackgroundSubtractorKNN_Create())}
+}
+
+// NewBackgroundSubtractorKNNWithParams returns a new BackgroundSubtractor algorithm
+// of type KNN with customized parameters. K-Nearest Neighbors (KNN) uses a Background/Foreground
+// Segmentation Algorithm
+//
+// For further details, please see:
+// https://docs.opencv.org/master/de/de1/group__video__motion.html#gac9be925771f805b6fdb614ec2292006d
+// https://docs.opencv.org/master/db/d88/classcv_1_1BackgroundSubtractorKNN.html
+//
+func NewBackgroundSubtractorKNNWithParams(history int, dist2Threshold float64, detectShadows bool) BackgroundSubtractorKNN {
+ return BackgroundSubtractorKNN{p: unsafe.Pointer(C.BackgroundSubtractorKNN_CreateWithParams(C.int(history), C.double(dist2Threshold), C.bool(detectShadows)))}
+}
+
+// Close BackgroundSubtractorKNN.
+func (k *BackgroundSubtractorKNN) Close() error {
+ C.BackgroundSubtractorKNN_Close((C.BackgroundSubtractorKNN)(k.p))
+ k.p = nil
+ return nil
+}
+
+// Apply computes a foreground mask using the current BackgroundSubtractorKNN.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/d7/df6/classcv_1_1BackgroundSubtractor.html#aa735e76f7069b3fa9c3f32395f9ccd21
+//
+func (k *BackgroundSubtractorKNN) Apply(src Mat, dst *Mat) {
+ C.BackgroundSubtractorKNN_Apply((C.BackgroundSubtractorKNN)(k.p), src.p, dst.p)
+ return
+}
+
+// CalcOpticalFlowFarneback computes a dense optical flow using
+// Gunnar Farneback's algorithm.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga5d10ebbd59fe09c5f650289ec0ece5af
+//
+func CalcOpticalFlowFarneback(prevImg Mat, nextImg Mat, flow *Mat, pyrScale float64, levels int, winsize int,
+ iterations int, polyN int, polySigma float64, flags int) {
+ C.CalcOpticalFlowFarneback(prevImg.p, nextImg.p, flow.p, C.double(pyrScale), C.int(levels), C.int(winsize),
+ C.int(iterations), C.int(polyN), C.double(polySigma), C.int(flags))
+ return
+}
+
+// CalcOpticalFlowPyrLK calculates an optical flow for a sparse feature set using
+// the iterative Lucas-Kanade method with pyramids.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323
+//
+func CalcOpticalFlowPyrLK(prevImg Mat, nextImg Mat, prevPts Mat, nextPts Mat, status *Mat, err *Mat) {
+ C.CalcOpticalFlowPyrLK(prevImg.p, nextImg.p, prevPts.p, nextPts.p, status.p, err.p)
+ return
+}
+
+// CalcOpticalFlowPyrLKWithParams calculates an optical flow for a sparse feature set using
+// the iterative Lucas-Kanade method with pyramids.
+//
+// For further details, please see:
+// https://docs.opencv.org/master/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323
+//
+func CalcOpticalFlowPyrLKWithParams(prevImg Mat, nextImg Mat, prevPts Mat, nextPts Mat, status *Mat, err *Mat,
+ winSize image.Point, maxLevel int, criteria TermCriteria, flags int, minEigThreshold float64) {
+ winSz := C.struct_Size{
+ width: C.int(winSize.X),
+ height: C.int(winSize.Y),
+ }
+ C.CalcOpticalFlowPyrLKWithParams(prevImg.p, nextImg.p, prevPts.p, nextPts.p, status.p, err.p, winSz, C.int(maxLevel), criteria.p, C.int(flags), C.double(minEigThreshold))
+ return
+}
diff --git a/vendor/gocv.io/x/gocv/video.h b/vendor/gocv.io/x/gocv/video.h
new file mode 100644
index 0000000..10b0a1b
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/video.h
@@ -0,0 +1,38 @@
+#ifndef _OPENCV3_VIDEO_H_
+#define _OPENCV3_VIDEO_H_
+
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+#include "core.h"
+
+#ifdef __cplusplus
+typedef cv::Ptr* BackgroundSubtractorMOG2;
+typedef cv::Ptr* BackgroundSubtractorKNN;
+#else
+typedef void* BackgroundSubtractorMOG2;
+typedef void* BackgroundSubtractorKNN;
+#endif
+
+BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_Create();
+BackgroundSubtractorMOG2 BackgroundSubtractorMOG2_CreateWithParams(int history, double varThreshold, bool detectShadows);
+void BackgroundSubtractorMOG2_Close(BackgroundSubtractorMOG2 b);
+void BackgroundSubtractorMOG2_Apply(BackgroundSubtractorMOG2 b, Mat src, Mat dst);
+
+BackgroundSubtractorKNN BackgroundSubtractorKNN_Create();
+BackgroundSubtractorKNN BackgroundSubtractorKNN_CreateWithParams(int history, double dist2Threshold, bool detectShadows);
+
+void BackgroundSubtractorKNN_Close(BackgroundSubtractorKNN b);
+void BackgroundSubtractorKNN_Apply(BackgroundSubtractorKNN b, Mat src, Mat dst);
+
+void CalcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err);
+void CalcOpticalFlowPyrLKWithParams(Mat prevImg, Mat nextImg, Mat prevPts, Mat nextPts, Mat status, Mat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold);
+void CalcOpticalFlowFarneback(Mat prevImg, Mat nextImg, Mat flow, double pyrScale, int levels,
+ int winsize, int iterations, int polyN, double polySigma, int flags);
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_VIDEO_H_
diff --git a/vendor/gocv.io/x/gocv/videoio.cpp b/vendor/gocv.io/x/gocv/videoio.cpp
new file mode 100644
index 0000000..2090fd5
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/videoio.cpp
@@ -0,0 +1,63 @@
+#include "videoio.h"
+
+// VideoWriter
+VideoCapture VideoCapture_New() {
+ return new cv::VideoCapture();
+}
+
+void VideoCapture_Close(VideoCapture v) {
+ delete v;
+}
+
+bool VideoCapture_Open(VideoCapture v, const char* uri) {
+ return v->open(uri);
+}
+
+bool VideoCapture_OpenDevice(VideoCapture v, int device) {
+ return v->open(device);
+}
+
+void VideoCapture_Set(VideoCapture v, int prop, double param) {
+ v->set(prop, param);
+}
+
+double VideoCapture_Get(VideoCapture v, int prop) {
+ return v->get(prop);
+}
+
+int VideoCapture_IsOpened(VideoCapture v) {
+ return v->isOpened();
+}
+
+int VideoCapture_Read(VideoCapture v, Mat buf) {
+ return v->read(*buf);
+}
+
+void VideoCapture_Grab(VideoCapture v, int skip) {
+ for (int i = 0; i < skip; i++) {
+ v->grab();
+ }
+}
+
+// VideoWriter
+VideoWriter VideoWriter_New() {
+ return new cv::VideoWriter();
+}
+
+void VideoWriter_Close(VideoWriter vw) {
+ delete vw;
+}
+
+void VideoWriter_Open(VideoWriter vw, const char* name, const char* codec, double fps, int width,
+ int height, bool isColor) {
+ int codecCode = cv::VideoWriter::fourcc(codec[0], codec[1], codec[2], codec[3]);
+ vw->open(name, codecCode, fps, cv::Size(width, height), isColor);
+}
+
+int VideoWriter_IsOpened(VideoWriter vw) {
+ return vw->isOpened();
+}
+
+void VideoWriter_Write(VideoWriter vw, Mat img) {
+ *vw << *img;
+}
diff --git a/vendor/gocv.io/x/gocv/videoio.go b/vendor/gocv.io/x/gocv/videoio.go
new file mode 100644
index 0000000..13f7620
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/videoio.go
@@ -0,0 +1,332 @@
+package gocv
+
+/*
+#include
+#include "videoio.h"
+*/
+import "C"
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "sync"
+ "unsafe"
+)
+
+// VideoCaptureProperties are the properties used for VideoCapture operations.
+type VideoCaptureProperties int
+
+const (
+ // VideoCapturePosMsec contains current position of the
+ // video file in milliseconds.
+ VideoCapturePosMsec VideoCaptureProperties = 0
+
+ // VideoCapturePosFrames 0-based index of the frame to be
+ // decoded/captured next.
+ VideoCapturePosFrames = 1
+
+ // VideoCapturePosAVIRatio relative position of the video file:
+ // 0=start of the film, 1=end of the film.
+ VideoCapturePosAVIRatio = 2
+
+ // VideoCaptureFrameWidth is width of the frames in the video stream.
+ VideoCaptureFrameWidth = 3
+
+ // VideoCaptureFrameHeight controls height of frames in the video stream.
+ VideoCaptureFrameHeight = 4
+
+ // VideoCaptureFPS controls capture frame rate.
+ VideoCaptureFPS = 5
+
+ // VideoCaptureFOURCC contains the 4-character code of codec.
+ // see VideoWriter::fourcc for details.
+ VideoCaptureFOURCC = 6
+
+ // VideoCaptureFrameCount contains number of frames in the video file.
+ VideoCaptureFrameCount = 7
+
+ // VideoCaptureFormat format of the Mat objects returned by
+ // VideoCapture::retrieve().
+ VideoCaptureFormat = 8
+
+ // VideoCaptureMode contains backend-specific value indicating
+ // the current capture mode.
+ VideoCaptureMode = 9
+
+ // VideoCaptureBrightness is brightness of the image
+ // (only for those cameras that support).
+ VideoCaptureBrightness = 10
+
+ // VideoCaptureContrast is contrast of the image
+ // (only for cameras that support it).
+ VideoCaptureContrast = 11
+
+ // VideoCaptureSaturation saturation of the image
+ // (only for cameras that support).
+ VideoCaptureSaturation = 12
+
+ // VideoCaptureHue hue of the image (only for cameras that support).
+ VideoCaptureHue = 13
+
+ // VideoCaptureGain is the gain of the capture image.
+ // (only for those cameras that support).
+ VideoCaptureGain = 14
+
+ // VideoCaptureExposure is the exposure of the capture image.
+ // (only for those cameras that support).
+ VideoCaptureExposure = 15
+
+ // VideoCaptureConvertRGB is a boolean flags indicating whether
+ // images should be converted to RGB.
+ VideoCaptureConvertRGB = 16
+
+ // VideoCaptureWhiteBalanceBlueU is currently unsupported.
+ VideoCaptureWhiteBalanceBlueU = 17
+
+ // VideoCaptureRectification is the rectification flag for stereo cameras.
+ // Note: only supported by DC1394 v 2.x backend currently.
+ VideoCaptureRectification = 18
+
+ // VideoCaptureMonochrome indicates whether images should be
+ // converted to monochrome.
+ VideoCaptureMonochrome = 19
+
+ // VideoCaptureSharpness controls image capture sharpness.
+ VideoCaptureSharpness = 20
+
+ // VideoCaptureAutoExposure controls the DC1394 exposure control
+ // done by camera, user can adjust reference level using this feature.
+ VideoCaptureAutoExposure = 21
+
+ // VideoCaptureGamma controls video capture gamma.
+ VideoCaptureGamma = 22
+
+ // VideoCaptureTemperature controls video capture temperature.
+ VideoCaptureTemperature = 23
+
+ // VideoCaptureTrigger controls video capture trigger.
+ VideoCaptureTrigger = 24
+
+ // VideoCaptureTriggerDelay controls video capture trigger delay.
+ VideoCaptureTriggerDelay = 25
+
+ // VideoCaptureWhiteBalanceRedV controls video capture setting for
+ // white balance.
+ VideoCaptureWhiteBalanceRedV = 26
+
+ // VideoCaptureZoom controls video capture zoom.
+ VideoCaptureZoom = 27
+
+ // VideoCaptureFocus controls video capture focus.
+ VideoCaptureFocus = 28
+
+ // VideoCaptureGUID controls video capture GUID.
+ VideoCaptureGUID = 29
+
+ // VideoCaptureISOSpeed controls video capture ISO speed.
+ VideoCaptureISOSpeed = 30
+
+ // VideoCaptureBacklight controls video capture backlight.
+ VideoCaptureBacklight = 32
+
+ // VideoCapturePan controls video capture pan.
+ VideoCapturePan = 33
+
+ // VideoCaptureTilt controls video capture tilt.
+ VideoCaptureTilt = 34
+
+ // VideoCaptureRoll controls video capture roll.
+ VideoCaptureRoll = 35
+
+ // VideoCaptureIris controls video capture iris.
+ VideoCaptureIris = 36
+
+ // VideoCaptureSettings is the pop up video/camera filter dialog. Note:
+ // only supported by DSHOW backend currently. The property value is ignored.
+ VideoCaptureSettings = 37
+
+ // VideoCaptureBufferSize controls video capture buffer size.
+ VideoCaptureBufferSize = 38
+
+ // VideoCaptureAutoFocus controls video capture auto focus..
+ VideoCaptureAutoFocus = 39
+)
+
+// VideoCapture is a wrapper around the OpenCV VideoCapture class.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/d8/dfe/classcv_1_1VideoCapture.html
+//
+type VideoCapture struct {
+ p C.VideoCapture
+}
+
+// VideoCaptureFile opens a VideoCapture from a file and prepares
+// to start capturing. It returns error if it fails to open the file stored in uri path.
+func VideoCaptureFile(uri string) (vc *VideoCapture, err error) {
+ vc = &VideoCapture{p: C.VideoCapture_New()}
+
+ cURI := C.CString(uri)
+ defer C.free(unsafe.Pointer(cURI))
+
+ if !C.VideoCapture_Open(vc.p, cURI) {
+ err = fmt.Errorf("Error opening file: %s", uri)
+ }
+
+ return
+}
+
+// VideoCaptureDevice opens a VideoCapture from a device and prepares
+// to start capturing. It returns error if it fails to open the video device.
+func VideoCaptureDevice(device int) (vc *VideoCapture, err error) {
+ vc = &VideoCapture{p: C.VideoCapture_New()}
+
+ if !C.VideoCapture_OpenDevice(vc.p, C.int(device)) {
+ err = fmt.Errorf("Error opening device: %d", device)
+ }
+
+ return
+}
+
+// Close VideoCapture object.
+func (v *VideoCapture) Close() error {
+ C.VideoCapture_Close(v.p)
+ v.p = nil
+ return nil
+}
+
+// Set parameter with property (=key).
+func (v *VideoCapture) Set(prop VideoCaptureProperties, param float64) {
+ C.VideoCapture_Set(v.p, C.int(prop), C.double(param))
+}
+
+// Get parameter with property (=key).
+func (v VideoCapture) Get(prop VideoCaptureProperties) float64 {
+ return float64(C.VideoCapture_Get(v.p, C.int(prop)))
+}
+
+// IsOpened returns if the VideoCapture has been opened to read from
+// a file or capture device.
+func (v *VideoCapture) IsOpened() bool {
+ isOpened := C.VideoCapture_IsOpened(v.p)
+ return isOpened != 0
+}
+
+// Read reads the next frame from the VideoCapture to the Mat passed in
+// as the param. It returns false if the VideoCapture cannot read frame.
+func (v *VideoCapture) Read(m *Mat) bool {
+ return C.VideoCapture_Read(v.p, m.p) != 0
+}
+
+// Grab skips a specific number of frames.
+func (v *VideoCapture) Grab(skip int) {
+ C.VideoCapture_Grab(v.p, C.int(skip))
+}
+
+// CodecString returns a string representation of FourCC bytes, i.e. the name of a codec
+func (v *VideoCapture) CodecString() string {
+ res := ""
+ hexes := []int64{0xff, 0xff00, 0xff0000, 0xff000000}
+ for i, h := range hexes {
+ res += string(int64(v.Get(VideoCaptureFOURCC)) & h >> (uint(i * 8)))
+ }
+ return res
+}
+
+// ToCodec returns an float64 representation of FourCC bytes
+func (v *VideoCapture) ToCodec(codec string) float64 {
+ if len(codec) != 4 {
+ return -1.0
+ }
+ c1 := []rune(string(codec[0]))[0]
+ c2 := []rune(string(codec[1]))[0]
+ c3 := []rune(string(codec[2]))[0]
+ c4 := []rune(string(codec[3]))[0]
+ return float64((c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24))
+}
+
+// VideoWriter is a wrapper around the OpenCV VideoWriter`class.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/dd/d9e/classcv_1_1VideoWriter.html
+//
+type VideoWriter struct {
+ mu *sync.RWMutex
+ p C.VideoWriter
+}
+
+// VideoWriterFile opens a VideoWriter with a specific output file.
+// The "codec" param should be the four-letter code for the desired output
+// codec, for example "MJPG".
+//
+// For further details, please see:
+// http://docs.opencv.org/master/dd/d9e/classcv_1_1VideoWriter.html#a0901c353cd5ea05bba455317dab81130
+//
+func VideoWriterFile(name string, codec string, fps float64, width int, height int, isColor bool) (vw *VideoWriter, err error) {
+
+ if fps == 0 || width == 0 || height == 0 {
+ return nil, fmt.Errorf("one of the numerical parameters "+
+ "is equal to zero: FPS: %f, width: %d, height: %d", fps, width, height)
+ }
+
+ vw = &VideoWriter{
+ p: C.VideoWriter_New(),
+ mu: &sync.RWMutex{},
+ }
+
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+
+ cCodec := C.CString(codec)
+ defer C.free(unsafe.Pointer(cCodec))
+
+ C.VideoWriter_Open(vw.p, cName, cCodec, C.double(fps), C.int(width), C.int(height), C.bool(isColor))
+ return
+}
+
+// Close VideoWriter object.
+func (vw *VideoWriter) Close() error {
+ C.VideoWriter_Close(vw.p)
+ vw.p = nil
+ return nil
+}
+
+// IsOpened checks if the VideoWriter is open and ready to be written to.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/dd/d9e/classcv_1_1VideoWriter.html#a9a40803e5f671968ac9efa877c984d75
+//
+func (vw *VideoWriter) IsOpened() bool {
+ isOpend := C.VideoWriter_IsOpened(vw.p)
+ return isOpend != 0
+}
+
+// Write the next video frame from the Mat image to the open VideoWriter.
+//
+// For further details, please see:
+// http://docs.opencv.org/master/dd/d9e/classcv_1_1VideoWriter.html#a3115b679d612a6a0b5864a0c88ed4b39
+//
+func (vw *VideoWriter) Write(img Mat) error {
+ vw.mu.Lock()
+ defer vw.mu.Unlock()
+ C.VideoWriter_Write(vw.p, img.p)
+ return nil
+}
+
+// OpenVideoCapture return VideoCapture specified by device ID if v is a
+// number. Return VideoCapture created from video file, URL, or GStreamer
+// pipeline if v is a string.
+func OpenVideoCapture(v interface{}) (*VideoCapture, error) {
+ switch vv := v.(type) {
+ case int:
+ return VideoCaptureDevice(vv)
+ case string:
+ id, err := strconv.Atoi(vv)
+ if err == nil {
+ return VideoCaptureDevice(id)
+ }
+ return VideoCaptureFile(vv)
+ default:
+ return nil, errors.New("argument must be int or string")
+ }
+}
diff --git a/vendor/gocv.io/x/gocv/videoio.h b/vendor/gocv.io/x/gocv/videoio.h
new file mode 100644
index 0000000..b779fd9
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/videoio.h
@@ -0,0 +1,42 @@
+#ifndef _OPENCV3_VIDEOIO_H_
+#define _OPENCV3_VIDEOIO_H_
+
+#ifdef __cplusplus
+#include
+extern "C" {
+#endif
+
+#include "core.h"
+
+#ifdef __cplusplus
+typedef cv::VideoCapture* VideoCapture;
+typedef cv::VideoWriter* VideoWriter;
+#else
+typedef void* VideoCapture;
+typedef void* VideoWriter;
+#endif
+
+// VideoCapture
+VideoCapture VideoCapture_New();
+void VideoCapture_Close(VideoCapture v);
+bool VideoCapture_Open(VideoCapture v, const char* uri);
+bool VideoCapture_OpenDevice(VideoCapture v, int device);
+void VideoCapture_Set(VideoCapture v, int prop, double param);
+double VideoCapture_Get(VideoCapture v, int prop);
+int VideoCapture_IsOpened(VideoCapture v);
+int VideoCapture_Read(VideoCapture v, Mat buf);
+void VideoCapture_Grab(VideoCapture v, int skip);
+
+// VideoWriter
+VideoWriter VideoWriter_New();
+void VideoWriter_Close(VideoWriter vw);
+void VideoWriter_Open(VideoWriter vw, const char* name, const char* codec, double fps, int width,
+ int height, bool isColor);
+int VideoWriter_IsOpened(VideoWriter vw);
+void VideoWriter_Write(VideoWriter vw, Mat img);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_OPENCV3_VIDEOIO_H_
diff --git a/vendor/gocv.io/x/gocv/videoio_string.go b/vendor/gocv.io/x/gocv/videoio_string.go
new file mode 100644
index 0000000..b17d257
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/videoio_string.go
@@ -0,0 +1,85 @@
+package gocv
+
+func (c VideoCaptureProperties) String() string {
+ switch c {
+ case VideoCapturePosMsec:
+ return "video-capture-pos-msec"
+ case VideoCapturePosFrames:
+ return "video-capture-pos-frames"
+ case VideoCapturePosAVIRatio:
+ return "video-capture-pos-avi-ratio"
+ case VideoCaptureFrameWidth:
+ return "video-capture-frame-width"
+ case VideoCaptureFrameHeight:
+ return "video-capture-frame-height"
+ case VideoCaptureFPS:
+ return "video-capture-fps"
+ case VideoCaptureFOURCC:
+ return "video-capture-fourcc"
+ case VideoCaptureFrameCount:
+ return "video-capture-frame-count"
+ case VideoCaptureFormat:
+ return "video-capture-format"
+ case VideoCaptureMode:
+ return "video-capture-mode"
+ case VideoCaptureBrightness:
+ return "video-capture-brightness"
+ case VideoCaptureContrast:
+ return "video-capture-contrast"
+ case VideoCaptureSaturation:
+ return "video-capture-saturation"
+ case VideoCaptureHue:
+ return "video-capture-hue"
+ case VideoCaptureGain:
+ return "video-capture-gain"
+ case VideoCaptureExposure:
+ return "video-capture-exposure"
+ case VideoCaptureConvertRGB:
+ return "video-capture-convert-rgb"
+ case VideoCaptureWhiteBalanceBlueU:
+ return "video-capture-white-balanced-blue-u"
+ case VideoCaptureWhiteBalanceRedV:
+ return "video-capture-white-balanced-red-v"
+ case VideoCaptureRectification:
+ return "video-capture-rectification"
+ case VideoCaptureMonochrome:
+ return "video-capture-monochrome"
+ case VideoCaptureSharpness:
+ return "video-capture-sharpness"
+ case VideoCaptureAutoExposure:
+ return "video-capture-auto-exposure"
+ case VideoCaptureGamma:
+ return "video-capture-gamma"
+ case VideoCaptureTemperature:
+ return "video-capture-temperature"
+ case VideoCaptureTrigger:
+ return "video-capture-trigger"
+ case VideoCaptureTriggerDelay:
+ return "video-capture-trigger-delay"
+ case VideoCaptureZoom:
+ return "video-capture-zoom"
+ case VideoCaptureFocus:
+ return "video-capture-focus"
+ case VideoCaptureGUID:
+ return "video-capture-guid"
+ case VideoCaptureISOSpeed:
+ return "video-capture-iso-speed"
+ case VideoCaptureBacklight:
+ return "video-capture-backlight"
+ case VideoCapturePan:
+ return "video-capture-pan"
+ case VideoCaptureTilt:
+ return "video-capture-tilt"
+ case VideoCaptureRoll:
+ return "video-capture-roll"
+ case VideoCaptureIris:
+ return "video-capture-iris"
+ case VideoCaptureSettings:
+ return "video-capture-settings"
+ case VideoCaptureBufferSize:
+ return "video-capture-buffer-size"
+ case VideoCaptureAutoFocus:
+ return "video-capture-auto-focus"
+ }
+ return ""
+}
diff --git a/vendor/gocv.io/x/gocv/win_build_opencv.cmd b/vendor/gocv.io/x/gocv/win_build_opencv.cmd
new file mode 100644
index 0000000..26e18cb
--- /dev/null
+++ b/vendor/gocv.io/x/gocv/win_build_opencv.cmd
@@ -0,0 +1,40 @@
+echo off
+
+if not exist "C:\opencv" mkdir "C:\opencv"
+if not exist "C:\opencv\build" mkdir "C:\opencv\build"
+
+echo Downloading OpenCV sources
+echo.
+echo For monitoring the download progress please check the C:\opencv directory.
+echo.
+
+REM This is why there is no progress bar:
+REM https://github.com/PowerShell/PowerShell/issues/2138
+
+echo Downloading: opencv-4.2.0.zip [91MB]
+powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv/archive/4.2.0.zip -OutFile c:\opencv\opencv-4.2.0.zip"
+echo Extracting...
+powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv-4.2.0.zip -DestinationPath c:\opencv"
+del c:\opencv\opencv-4.2.0.zip /q
+echo.
+
+echo Downloading: opencv_contrib-4.2.0.zip [58MB]
+powershell -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://github.com/opencv/opencv_contrib/archive/4.2.0.zip -OutFile c:\opencv\opencv_contrib-4.2.0.zip"
+echo Extracting...
+powershell -command "$ProgressPreference = 'SilentlyContinue'; Expand-Archive -Path c:\opencv\opencv_contrib-4.2.0.zip -DestinationPath c:\opencv"
+del c:\opencv\opencv_contrib-4.2.0.zip /q
+echo.
+
+echo Done with downloading and extracting sources.
+echo.
+
+echo on
+
+cd /D C:\opencv\build
+set PATH=%PATH%;C:\Program Files (x86)\CMake\bin;C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
+cmake C:\opencv\opencv-4.2.0 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.2.0\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DOPENCV_GENERATE_PKGCONFIG=ON -DWITH_OPENCL_D3D11_NV=OFF -Wno-dev
+mingw32-make -j%NUMBER_OF_PROCESSORS%
+mingw32-make install
+rmdir c:\opencv\opencv-4.2.0 /s /q
+rmdir c:\opencv\opencv_contrib-4.2.0 /s /q
+chdir /D %GOPATH%\src\gocv.io\x\gocv
diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go
new file mode 100644
index 0000000..3d6f516
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/socks/client.go
@@ -0,0 +1,168 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package socks
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net"
+ "strconv"
+ "time"
+)
+
+var (
+ noDeadline = time.Time{}
+ aLongTimeAgo = time.Unix(1, 0)
+)
+
+func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) {
+ host, port, err := splitHostPort(address)
+ if err != nil {
+ return nil, err
+ }
+ if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() {
+ c.SetDeadline(deadline)
+ defer c.SetDeadline(noDeadline)
+ }
+ if ctx != context.Background() {
+ errCh := make(chan error, 1)
+ done := make(chan struct{})
+ defer func() {
+ close(done)
+ if ctxErr == nil {
+ ctxErr = <-errCh
+ }
+ }()
+ go func() {
+ select {
+ case <-ctx.Done():
+ c.SetDeadline(aLongTimeAgo)
+ errCh <- ctx.Err()
+ case <-done:
+ errCh <- nil
+ }
+ }()
+ }
+
+ b := make([]byte, 0, 6+len(host)) // the size here is just an estimate
+ b = append(b, Version5)
+ if len(d.AuthMethods) == 0 || d.Authenticate == nil {
+ b = append(b, 1, byte(AuthMethodNotRequired))
+ } else {
+ ams := d.AuthMethods
+ if len(ams) > 255 {
+ return nil, errors.New("too many authentication methods")
+ }
+ b = append(b, byte(len(ams)))
+ for _, am := range ams {
+ b = append(b, byte(am))
+ }
+ }
+ if _, ctxErr = c.Write(b); ctxErr != nil {
+ return
+ }
+
+ if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil {
+ return
+ }
+ if b[0] != Version5 {
+ return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
+ }
+ am := AuthMethod(b[1])
+ if am == AuthMethodNoAcceptableMethods {
+ return nil, errors.New("no acceptable authentication methods")
+ }
+ if d.Authenticate != nil {
+ if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil {
+ return
+ }
+ }
+
+ b = b[:0]
+ b = append(b, Version5, byte(d.cmd), 0)
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ b = append(b, AddrTypeIPv4)
+ b = append(b, ip4...)
+ } else if ip6 := ip.To16(); ip6 != nil {
+ b = append(b, AddrTypeIPv6)
+ b = append(b, ip6...)
+ } else {
+ return nil, errors.New("unknown address type")
+ }
+ } else {
+ if len(host) > 255 {
+ return nil, errors.New("FQDN too long")
+ }
+ b = append(b, AddrTypeFQDN)
+ b = append(b, byte(len(host)))
+ b = append(b, host...)
+ }
+ b = append(b, byte(port>>8), byte(port))
+ if _, ctxErr = c.Write(b); ctxErr != nil {
+ return
+ }
+
+ if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil {
+ return
+ }
+ if b[0] != Version5 {
+ return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
+ }
+ if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded {
+ return nil, errors.New("unknown error " + cmdErr.String())
+ }
+ if b[2] != 0 {
+ return nil, errors.New("non-zero reserved field")
+ }
+ l := 2
+ var a Addr
+ switch b[3] {
+ case AddrTypeIPv4:
+ l += net.IPv4len
+ a.IP = make(net.IP, net.IPv4len)
+ case AddrTypeIPv6:
+ l += net.IPv6len
+ a.IP = make(net.IP, net.IPv6len)
+ case AddrTypeFQDN:
+ if _, err := io.ReadFull(c, b[:1]); err != nil {
+ return nil, err
+ }
+ l += int(b[0])
+ default:
+ return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3])))
+ }
+ if cap(b) < l {
+ b = make([]byte, l)
+ } else {
+ b = b[:l]
+ }
+ if _, ctxErr = io.ReadFull(c, b); ctxErr != nil {
+ return
+ }
+ if a.IP != nil {
+ copy(a.IP, b)
+ } else {
+ a.Name = string(b[:len(b)-2])
+ }
+ a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1])
+ return &a, nil
+}
+
+func splitHostPort(address string) (string, int, error) {
+ host, port, err := net.SplitHostPort(address)
+ if err != nil {
+ return "", 0, err
+ }
+ portnum, err := strconv.Atoi(port)
+ if err != nil {
+ return "", 0, err
+ }
+ if 1 > portnum || portnum > 0xffff {
+ return "", 0, errors.New("port number out of range " + port)
+ }
+ return host, portnum, nil
+}
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
new file mode 100644
index 0000000..97db234
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/socks/socks.go
@@ -0,0 +1,317 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package socks provides a SOCKS version 5 client implementation.
+//
+// SOCKS protocol version 5 is defined in RFC 1928.
+// Username/Password authentication for SOCKS version 5 is defined in
+// RFC 1929.
+package socks
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net"
+ "strconv"
+)
+
+// A Command represents a SOCKS command.
+type Command int
+
+func (cmd Command) String() string {
+ switch cmd {
+ case CmdConnect:
+ return "socks connect"
+ case cmdBind:
+ return "socks bind"
+ default:
+ return "socks " + strconv.Itoa(int(cmd))
+ }
+}
+
+// An AuthMethod represents a SOCKS authentication method.
+type AuthMethod int
+
+// A Reply represents a SOCKS command reply code.
+type Reply int
+
+func (code Reply) String() string {
+ switch code {
+ case StatusSucceeded:
+ return "succeeded"
+ case 0x01:
+ return "general SOCKS server failure"
+ case 0x02:
+ return "connection not allowed by ruleset"
+ case 0x03:
+ return "network unreachable"
+ case 0x04:
+ return "host unreachable"
+ case 0x05:
+ return "connection refused"
+ case 0x06:
+ return "TTL expired"
+ case 0x07:
+ return "command not supported"
+ case 0x08:
+ return "address type not supported"
+ default:
+ return "unknown code: " + strconv.Itoa(int(code))
+ }
+}
+
+// Wire protocol constants.
+const (
+ Version5 = 0x05
+
+ AddrTypeIPv4 = 0x01
+ AddrTypeFQDN = 0x03
+ AddrTypeIPv6 = 0x04
+
+ CmdConnect Command = 0x01 // establishes an active-open forward proxy connection
+ cmdBind Command = 0x02 // establishes a passive-open forward proxy connection
+
+ AuthMethodNotRequired AuthMethod = 0x00 // no authentication required
+ AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password
+ AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods
+
+ StatusSucceeded Reply = 0x00
+)
+
+// An Addr represents a SOCKS-specific address.
+// Either Name or IP is used exclusively.
+type Addr struct {
+ Name string // fully-qualified domain name
+ IP net.IP
+ Port int
+}
+
+func (a *Addr) Network() string { return "socks" }
+
+func (a *Addr) String() string {
+ if a == nil {
+ return ""
+ }
+ port := strconv.Itoa(a.Port)
+ if a.IP == nil {
+ return net.JoinHostPort(a.Name, port)
+ }
+ return net.JoinHostPort(a.IP.String(), port)
+}
+
+// A Conn represents a forward proxy connection.
+type Conn struct {
+ net.Conn
+
+ boundAddr net.Addr
+}
+
+// BoundAddr returns the address assigned by the proxy server for
+// connecting to the command target address from the proxy server.
+func (c *Conn) BoundAddr() net.Addr {
+ if c == nil {
+ return nil
+ }
+ return c.boundAddr
+}
+
+// A Dialer holds SOCKS-specific options.
+type Dialer struct {
+ cmd Command // either CmdConnect or cmdBind
+ proxyNetwork string // network between a proxy server and a client
+ proxyAddress string // proxy server address
+
+ // ProxyDial specifies the optional dial function for
+ // establishing the transport connection.
+ ProxyDial func(context.Context, string, string) (net.Conn, error)
+
+ // AuthMethods specifies the list of request authentication
+ // methods.
+ // If empty, SOCKS client requests only AuthMethodNotRequired.
+ AuthMethods []AuthMethod
+
+ // Authenticate specifies the optional authentication
+ // function. It must be non-nil when AuthMethods is not empty.
+ // It must return an error when the authentication is failed.
+ Authenticate func(context.Context, io.ReadWriter, AuthMethod) error
+}
+
+// DialContext connects to the provided address on the provided
+// network.
+//
+// The returned error value may be a net.OpError. When the Op field of
+// net.OpError contains "socks", the Source field contains a proxy
+// server address and the Addr field contains a command target
+// address.
+//
+// See func Dial of the net package of standard library for a
+// description of the network and address parameters.
+func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ if err := d.validateTarget(network, address); err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ if ctx == nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
+ }
+ var err error
+ var c net.Conn
+ if d.ProxyDial != nil {
+ c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress)
+ } else {
+ var dd net.Dialer
+ c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress)
+ }
+ if err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ a, err := d.connect(ctx, c, address)
+ if err != nil {
+ c.Close()
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ return &Conn{Conn: c, boundAddr: a}, nil
+}
+
+// DialWithConn initiates a connection from SOCKS server to the target
+// network and address using the connection c that is already
+// connected to the SOCKS server.
+//
+// It returns the connection's local address assigned by the SOCKS
+// server.
+func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) {
+ if err := d.validateTarget(network, address); err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ if ctx == nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
+ }
+ a, err := d.connect(ctx, c, address)
+ if err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ return a, nil
+}
+
+// Dial connects to the provided address on the provided network.
+//
+// Unlike DialContext, it returns a raw transport connection instead
+// of a forward proxy connection.
+//
+// Deprecated: Use DialContext or DialWithConn instead.
+func (d *Dialer) Dial(network, address string) (net.Conn, error) {
+ if err := d.validateTarget(network, address); err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ var err error
+ var c net.Conn
+ if d.ProxyDial != nil {
+ c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress)
+ } else {
+ c, err = net.Dial(d.proxyNetwork, d.proxyAddress)
+ }
+ if err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (d *Dialer) validateTarget(network, address string) error {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return errors.New("network not implemented")
+ }
+ switch d.cmd {
+ case CmdConnect, cmdBind:
+ default:
+ return errors.New("command not implemented")
+ }
+ return nil
+}
+
+func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) {
+ for i, s := range []string{d.proxyAddress, address} {
+ host, port, err := splitHostPort(s)
+ if err != nil {
+ return nil, nil, err
+ }
+ a := &Addr{Port: port}
+ a.IP = net.ParseIP(host)
+ if a.IP == nil {
+ a.Name = host
+ }
+ if i == 0 {
+ proxy = a
+ } else {
+ dst = a
+ }
+ }
+ return
+}
+
+// NewDialer returns a new Dialer that dials through the provided
+// proxy server's network and address.
+func NewDialer(network, address string) *Dialer {
+ return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect}
+}
+
+const (
+ authUsernamePasswordVersion = 0x01
+ authStatusSucceeded = 0x00
+)
+
+// UsernamePassword are the credentials for the username/password
+// authentication method.
+type UsernamePassword struct {
+ Username string
+ Password string
+}
+
+// Authenticate authenticates a pair of username and password with the
+// proxy server.
+func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error {
+ switch auth {
+ case AuthMethodNotRequired:
+ return nil
+ case AuthMethodUsernamePassword:
+ if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 {
+ return errors.New("invalid username/password")
+ }
+ b := []byte{authUsernamePasswordVersion}
+ b = append(b, byte(len(up.Username)))
+ b = append(b, up.Username...)
+ b = append(b, byte(len(up.Password)))
+ b = append(b, up.Password...)
+ // TODO(mikio): handle IO deadlines and cancelation if
+ // necessary
+ if _, err := rw.Write(b); err != nil {
+ return err
+ }
+ if _, err := io.ReadFull(rw, b[:2]); err != nil {
+ return err
+ }
+ if b[0] != authUsernamePasswordVersion {
+ return errors.New("invalid username/password version")
+ }
+ if b[1] != authStatusSucceeded {
+ return errors.New("username/password authentication failed")
+ }
+ return nil
+ }
+ return errors.New("unsupported authentication method " + strconv.Itoa(int(auth)))
+}
diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go
new file mode 100644
index 0000000..811c2e4
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/dial.go
@@ -0,0 +1,54 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "context"
+ "net"
+)
+
+// A ContextDialer dials using a context.
+type ContextDialer interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment.
+//
+// The passed ctx is only used for returning the Conn, not the lifetime of the Conn.
+//
+// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer
+// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout.
+//
+// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
+func Dial(ctx context.Context, network, address string) (net.Conn, error) {
+ d := FromEnvironment()
+ if xd, ok := d.(ContextDialer); ok {
+ return xd.DialContext(ctx, network, address)
+ }
+ return dialContext(ctx, d, network, address)
+}
+
+// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout
+// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
+func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) {
+ var (
+ conn net.Conn
+ done = make(chan struct{}, 1)
+ err error
+ )
+ go func() {
+ conn, err = d.Dial(network, address)
+ close(done)
+ if conn != nil && ctx.Err() != nil {
+ conn.Close()
+ }
+ }()
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ case <-done:
+ }
+ return conn, err
+}
diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go
new file mode 100644
index 0000000..3d66bde
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/direct.go
@@ -0,0 +1,31 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "context"
+ "net"
+)
+
+type direct struct{}
+
+// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext.
+var Direct = direct{}
+
+var (
+ _ Dialer = Direct
+ _ ContextDialer = Direct
+)
+
+// Dial directly invokes net.Dial with the supplied parameters.
+func (direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
+
+// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters.
+func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
+ var d net.Dialer
+ return d.DialContext(ctx, network, addr)
+}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
new file mode 100644
index 0000000..573fe79
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/per_host.go
@@ -0,0 +1,155 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "context"
+ "net"
+ "strings"
+)
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type PerHost struct {
+ def, bypass Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
+ return &PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+// DialContext connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ d := p.dialerForRequest(host)
+ if x, ok := d.(ContextDialer); ok {
+ return x.DialContext(ctx, network, addr)
+ }
+ return dialContext(ctx, d, network, addr)
+}
+
+func (p *PerHost) dialerForRequest(host string) Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone ".example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go
new file mode 100644
index 0000000..9ff4b9a
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/proxy.go
@@ -0,0 +1,149 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+package proxy // import "golang.org/x/net/proxy"
+
+import (
+ "errors"
+ "net"
+ "net/url"
+ "os"
+ "sync"
+)
+
+// A Dialer is a means to establish a connection.
+// Custom dialers should also implement ContextDialer.
+type Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy-related
+// variables in the environment and makes underlying connections
+// directly.
+func FromEnvironment() Dialer {
+ return FromEnvironmentUsing(Direct)
+}
+
+// FromEnvironmentUsing returns the dialer specify by the proxy-related
+// variables in the environment and makes underlying connections
+// using the provided forwarding Dialer (for instance, a *net.Dialer
+// with desired configuration).
+func FromEnvironmentUsing(forward Dialer) Dialer {
+ allProxy := allProxyEnv.Get()
+ if len(allProxy) == 0 {
+ return forward
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return forward
+ }
+ proxy, err := FromURL(proxyURL, forward)
+ if err != nil {
+ return forward
+ }
+
+ noProxy := noProxyEnv.Get()
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := NewPerHost(proxy, forward)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {
+ if proxySchemes == nil {
+ proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))
+ }
+ proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
+ var auth *Auth
+ if u.User != nil {
+ auth = new(Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5", "socks5h":
+ addr := u.Hostname()
+ port := u.Port()
+ if port == "" {
+ port = "1080"
+ }
+ return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxySchemes != nil {
+ if f, ok := proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+ allProxyEnv = &envOnce{
+ names: []string{"ALL_PROXY", "all_proxy"},
+ }
+ noProxyEnv = &envOnce{
+ names: []string{"NO_PROXY", "no_proxy"},
+ }
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type envOnce struct {
+ names []string
+ once sync.Once
+ val string
+}
+
+func (e *envOnce) Get() string {
+ e.once.Do(e.init)
+ return e.val
+}
+
+func (e *envOnce) init() {
+ for _, n := range e.names {
+ e.val = os.Getenv(n)
+ if e.val != "" {
+ return
+ }
+ }
+}
+
+// reset is used by tests
+func (e *envOnce) reset() {
+ e.once = sync.Once{}
+ e.val = ""
+}
diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go
new file mode 100644
index 0000000..c91651f
--- /dev/null
+++ b/vendor/golang.org/x/net/proxy/socks5.go
@@ -0,0 +1,42 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+ "context"
+ "net"
+
+ "golang.org/x/net/internal/socks"
+)
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given
+// address with an optional username and password.
+// See RFC 1928 and RFC 1929.
+func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) {
+ d := socks.NewDialer(network, address)
+ if forward != nil {
+ if f, ok := forward.(ContextDialer); ok {
+ d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
+ return f.DialContext(ctx, network, address)
+ }
+ } else {
+ d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
+ return dialContext(ctx, forward, network, address)
+ }
+ }
+ }
+ if auth != nil {
+ up := socks.UsernamePassword{
+ Username: auth.User,
+ Password: auth.Password,
+ }
+ d.AuthMethods = []socks.AuthMethod{
+ socks.AuthMethodNotRequired,
+ socks.AuthMethodUsernamePassword,
+ }
+ d.Authenticate = up.Authenticate
+ }
+ return d, nil
+}
diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go
new file mode 100644
index 0000000..69a4ac7
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/client.go
@@ -0,0 +1,106 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+)
+
+// DialError is an error that occurs while dialling a websocket server.
+type DialError struct {
+ *Config
+ Err error
+}
+
+func (e *DialError) Error() string {
+ return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
+}
+
+// NewConfig creates a new WebSocket config for client connection.
+func NewConfig(server, origin string) (config *Config, err error) {
+ config = new(Config)
+ config.Version = ProtocolVersionHybi13
+ config.Location, err = url.ParseRequestURI(server)
+ if err != nil {
+ return
+ }
+ config.Origin, err = url.ParseRequestURI(origin)
+ if err != nil {
+ return
+ }
+ config.Header = http.Header(make(map[string][]string))
+ return
+}
+
+// NewClient creates a new WebSocket client connection over rwc.
+func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ err = hybiClientHandshake(config, br, bw)
+ if err != nil {
+ return
+ }
+ buf := bufio.NewReadWriter(br, bw)
+ ws = newHybiClientConn(config, buf, rwc)
+ return
+}
+
+// Dial opens a new client connection to a WebSocket.
+func Dial(url_, protocol, origin string) (ws *Conn, err error) {
+ config, err := NewConfig(url_, origin)
+ if err != nil {
+ return nil, err
+ }
+ if protocol != "" {
+ config.Protocol = []string{protocol}
+ }
+ return DialConfig(config)
+}
+
+var portMap = map[string]string{
+ "ws": "80",
+ "wss": "443",
+}
+
+func parseAuthority(location *url.URL) string {
+ if _, ok := portMap[location.Scheme]; ok {
+ if _, _, err := net.SplitHostPort(location.Host); err != nil {
+ return net.JoinHostPort(location.Host, portMap[location.Scheme])
+ }
+ }
+ return location.Host
+}
+
+// DialConfig opens a new client connection to a WebSocket with a config.
+func DialConfig(config *Config) (ws *Conn, err error) {
+ var client net.Conn
+ if config.Location == nil {
+ return nil, &DialError{config, ErrBadWebSocketLocation}
+ }
+ if config.Origin == nil {
+ return nil, &DialError{config, ErrBadWebSocketOrigin}
+ }
+ dialer := config.Dialer
+ if dialer == nil {
+ dialer = &net.Dialer{}
+ }
+ client, err = dialWithDialer(dialer, config)
+ if err != nil {
+ goto Error
+ }
+ ws, err = NewClient(config, client)
+ if err != nil {
+ client.Close()
+ goto Error
+ }
+ return
+
+Error:
+ return nil, &DialError{config, err}
+}
diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go
new file mode 100644
index 0000000..2dab943
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/dial.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net"
+)
+
+func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) {
+ switch config.Location.Scheme {
+ case "ws":
+ conn, err = dialer.Dial("tcp", parseAuthority(config.Location))
+
+ case "wss":
+ conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig)
+
+ default:
+ err = ErrBadScheme
+ }
+ return
+}
diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go
new file mode 100644
index 0000000..8cffdd1
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/hybi.go
@@ -0,0 +1,583 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+// This file implements a protocol of hybi draft.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+const (
+ websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+ closeStatusNormal = 1000
+ closeStatusGoingAway = 1001
+ closeStatusProtocolError = 1002
+ closeStatusUnsupportedData = 1003
+ closeStatusFrameTooLarge = 1004
+ closeStatusNoStatusRcvd = 1005
+ closeStatusAbnormalClosure = 1006
+ closeStatusBadMessageData = 1007
+ closeStatusPolicyViolation = 1008
+ closeStatusTooBigData = 1009
+ closeStatusExtensionMismatch = 1010
+
+ maxControlFramePayloadLength = 125
+)
+
+var (
+ ErrBadMaskingKey = &ProtocolError{"bad masking key"}
+ ErrBadPongMessage = &ProtocolError{"bad pong message"}
+ ErrBadClosingStatus = &ProtocolError{"bad closing status"}
+ ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"}
+ ErrNotImplemented = &ProtocolError{"not implemented"}
+
+ handshakeHeader = map[string]bool{
+ "Host": true,
+ "Upgrade": true,
+ "Connection": true,
+ "Sec-Websocket-Key": true,
+ "Sec-Websocket-Origin": true,
+ "Sec-Websocket-Version": true,
+ "Sec-Websocket-Protocol": true,
+ "Sec-Websocket-Accept": true,
+ }
+)
+
+// A hybiFrameHeader is a frame header as defined in hybi draft.
+type hybiFrameHeader struct {
+ Fin bool
+ Rsv [3]bool
+ OpCode byte
+ Length int64
+ MaskingKey []byte
+
+ data *bytes.Buffer
+}
+
+// A hybiFrameReader is a reader for hybi frame.
+type hybiFrameReader struct {
+ reader io.Reader
+
+ header hybiFrameHeader
+ pos int64
+ length int
+}
+
+func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {
+ n, err = frame.reader.Read(msg)
+ if frame.header.MaskingKey != nil {
+ for i := 0; i < n; i++ {
+ msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]
+ frame.pos++
+ }
+ }
+ return n, err
+}
+
+func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }
+
+func (frame *hybiFrameReader) HeaderReader() io.Reader {
+ if frame.header.data == nil {
+ return nil
+ }
+ if frame.header.data.Len() == 0 {
+ return nil
+ }
+ return frame.header.data
+}
+
+func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }
+
+func (frame *hybiFrameReader) Len() (n int) { return frame.length }
+
+// A hybiFrameReaderFactory creates new frame reader based on its frame type.
+type hybiFrameReaderFactory struct {
+ *bufio.Reader
+}
+
+// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.
+// See Section 5.2 Base Framing protocol for detail.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2
+func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {
+ hybiFrame := new(hybiFrameReader)
+ frame = hybiFrame
+ var header []byte
+ var b byte
+ // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0
+ for i := 0; i < 3; i++ {
+ j := uint(6 - i)
+ hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0
+ }
+ hybiFrame.header.OpCode = header[0] & 0x0f
+
+ // Second byte. Mask/Payload len(7bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ mask := (b & 0x80) != 0
+ b &= 0x7f
+ lengthFields := 0
+ switch {
+ case b <= 125: // Payload length 7bits.
+ hybiFrame.header.Length = int64(b)
+ case b == 126: // Payload length 7+16bits
+ lengthFields = 2
+ case b == 127: // Payload length 7+64bits
+ lengthFields = 8
+ }
+ for i := 0; i < lengthFields; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits
+ b &= 0x7f
+ }
+ header = append(header, b)
+ hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)
+ }
+ if mask {
+ // Masking key. 4 bytes.
+ for i := 0; i < 4; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)
+ }
+ }
+ hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)
+ hybiFrame.header.data = bytes.NewBuffer(header)
+ hybiFrame.length = len(header) + int(hybiFrame.header.Length)
+ return
+}
+
+// A HybiFrameWriter is a writer for hybi frame.
+type hybiFrameWriter struct {
+ writer *bufio.Writer
+
+ header *hybiFrameHeader
+}
+
+func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {
+ var header []byte
+ var b byte
+ if frame.header.Fin {
+ b |= 0x80
+ }
+ for i := 0; i < 3; i++ {
+ if frame.header.Rsv[i] {
+ j := uint(6 - i)
+ b |= 1 << j
+ }
+ }
+ b |= frame.header.OpCode
+ header = append(header, b)
+ if frame.header.MaskingKey != nil {
+ b = 0x80
+ } else {
+ b = 0
+ }
+ lengthFields := 0
+ length := len(msg)
+ switch {
+ case length <= 125:
+ b |= byte(length)
+ case length < 65536:
+ b |= 126
+ lengthFields = 2
+ default:
+ b |= 127
+ lengthFields = 8
+ }
+ header = append(header, b)
+ for i := 0; i < lengthFields; i++ {
+ j := uint((lengthFields - i - 1) * 8)
+ b = byte((length >> j) & 0xff)
+ header = append(header, b)
+ }
+ if frame.header.MaskingKey != nil {
+ if len(frame.header.MaskingKey) != 4 {
+ return 0, ErrBadMaskingKey
+ }
+ header = append(header, frame.header.MaskingKey...)
+ frame.writer.Write(header)
+ data := make([]byte, length)
+ for i := range data {
+ data[i] = msg[i] ^ frame.header.MaskingKey[i%4]
+ }
+ frame.writer.Write(data)
+ err = frame.writer.Flush()
+ return length, err
+ }
+ frame.writer.Write(header)
+ frame.writer.Write(msg)
+ err = frame.writer.Flush()
+ return length, err
+}
+
+func (frame *hybiFrameWriter) Close() error { return nil }
+
+type hybiFrameWriterFactory struct {
+ *bufio.Writer
+ needMaskingKey bool
+}
+
+func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}
+ if buf.needMaskingKey {
+ frameHeader.MaskingKey, err = generateMaskingKey()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil
+}
+
+type hybiFrameHandler struct {
+ conn *Conn
+ payloadType byte
+}
+
+func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) {
+ if handler.conn.IsServerConn() {
+ // The client MUST mask all frames sent to the server.
+ if frame.(*hybiFrameReader).header.MaskingKey == nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ } else {
+ // The server MUST NOT mask all frames.
+ if frame.(*hybiFrameReader).header.MaskingKey != nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ }
+ if header := frame.HeaderReader(); header != nil {
+ io.Copy(ioutil.Discard, header)
+ }
+ switch frame.PayloadType() {
+ case ContinuationFrame:
+ frame.(*hybiFrameReader).header.OpCode = handler.payloadType
+ case TextFrame, BinaryFrame:
+ handler.payloadType = frame.PayloadType()
+ case CloseFrame:
+ return nil, io.EOF
+ case PingFrame, PongFrame:
+ b := make([]byte, maxControlFramePayloadLength)
+ n, err := io.ReadFull(frame, b)
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+ io.Copy(ioutil.Discard, frame)
+ if frame.PayloadType() == PingFrame {
+ if _, err := handler.WritePong(b[:n]); err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ }
+ return frame, nil
+}
+
+func (handler *hybiFrameHandler) WriteClose(status int) (err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)
+ if err != nil {
+ return err
+ }
+ msg := make([]byte, 2)
+ binary.BigEndian.PutUint16(msg, uint16(status))
+ _, err = w.Write(msg)
+ w.Close()
+ return err
+}
+
+func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.
+func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ if buf == nil {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ buf = bufio.NewReadWriter(br, bw)
+ }
+ ws := &Conn{config: config, request: request, buf: buf, rwc: rwc,
+ frameReaderFactory: hybiFrameReaderFactory{buf.Reader},
+ frameWriterFactory: hybiFrameWriterFactory{
+ buf.Writer, request == nil},
+ PayloadType: TextFrame,
+ defaultCloseStatus: closeStatusNormal}
+ ws.frameHandler = &hybiFrameHandler{conn: ws}
+ return ws
+}
+
+// generateMaskingKey generates a masking key for a frame.
+func generateMaskingKey() (maskingKey []byte, err error) {
+ maskingKey = make([]byte, 4)
+ if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {
+ return
+ }
+ return
+}
+
+// generateNonce generates a nonce consisting of a randomly selected 16-byte
+// value that has been base64-encoded.
+func generateNonce() (nonce []byte) {
+ key := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ panic(err)
+ }
+ nonce = make([]byte, 24)
+ base64.StdEncoding.Encode(nonce, key)
+ return
+}
+
+// removeZone removes IPv6 zone identifer from host.
+// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+func removeZone(host string) string {
+ if !strings.HasPrefix(host, "[") {
+ return host
+ }
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return host
+ }
+ j := strings.LastIndex(host[:i], "%")
+ if j < 0 {
+ return host
+ }
+ return host[:j] + host[i:]
+}
+
+// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of
+// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string.
+func getNonceAccept(nonce []byte) (expected []byte, err error) {
+ h := sha1.New()
+ if _, err = h.Write(nonce); err != nil {
+ return
+ }
+ if _, err = h.Write([]byte(websocketGUID)); err != nil {
+ return
+ }
+ expected = make([]byte, 28)
+ base64.StdEncoding.Encode(expected, h.Sum(nil))
+ return
+}
+
+// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17
+func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {
+ bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+ // intermediary must remove any IPv6 zone identifier attached
+ // to an outgoing URI.
+ bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n")
+ bw.WriteString("Upgrade: websocket\r\n")
+ bw.WriteString("Connection: Upgrade\r\n")
+ nonce := generateNonce()
+ if config.handshakeData != nil {
+ nonce = []byte(config.handshakeData["key"])
+ }
+ bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n")
+ bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n")
+
+ if config.Version != ProtocolVersionHybi13 {
+ return ErrBadProtocolVersion
+ }
+
+ bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n")
+ if len(config.Protocol) > 0 {
+ bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ err = config.Header.WriteSubset(bw, handshakeHeader)
+ if err != nil {
+ return err
+ }
+
+ bw.WriteString("\r\n")
+ if err = bw.Flush(); err != nil {
+ return err
+ }
+
+ resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 101 {
+ return ErrBadStatus
+ }
+ if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" ||
+ strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
+ return ErrBadUpgrade
+ }
+ expectedAccept, err := getNonceAccept(nonce)
+ if err != nil {
+ return err
+ }
+ if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) {
+ return ErrChallengeResponse
+ }
+ if resp.Header.Get("Sec-WebSocket-Extensions") != "" {
+ return ErrUnsupportedExtensions
+ }
+ offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol")
+ if offeredProtocol != "" {
+ protocolMatched := false
+ for i := 0; i < len(config.Protocol); i++ {
+ if config.Protocol[i] == offeredProtocol {
+ protocolMatched = true
+ break
+ }
+ }
+ if !protocolMatched {
+ return ErrBadWebSocketProtocol
+ }
+ config.Protocol = []string{offeredProtocol}
+ }
+
+ return nil
+}
+
+// newHybiClientConn creates a client WebSocket connection after handshake.
+func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {
+ return newHybiConn(config, buf, rwc, nil)
+}
+
+// A HybiServerHandshaker performs a server handshake using hybi draft protocol.
+type hybiServerHandshaker struct {
+ *Config
+ accept []byte
+}
+
+func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {
+ c.Version = ProtocolVersionHybi13
+ if req.Method != "GET" {
+ return http.StatusMethodNotAllowed, ErrBadRequestMethod
+ }
+ // HTTP version can be safely ignored.
+
+ if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" ||
+ !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") {
+ return http.StatusBadRequest, ErrNotWebSocket
+ }
+
+ key := req.Header.Get("Sec-Websocket-Key")
+ if key == "" {
+ return http.StatusBadRequest, ErrChallengeResponse
+ }
+ version := req.Header.Get("Sec-Websocket-Version")
+ switch version {
+ case "13":
+ c.Version = ProtocolVersionHybi13
+ default:
+ return http.StatusBadRequest, ErrBadWebSocketVersion
+ }
+ var scheme string
+ if req.TLS != nil {
+ scheme = "wss"
+ } else {
+ scheme = "ws"
+ }
+ c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI())
+ if err != nil {
+ return http.StatusBadRequest, err
+ }
+ protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol"))
+ if protocol != "" {
+ protocols := strings.Split(protocol, ",")
+ for i := 0; i < len(protocols); i++ {
+ c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))
+ }
+ }
+ c.accept, err = getNonceAccept([]byte(key))
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ return http.StatusSwitchingProtocols, nil
+}
+
+// Origin parses the Origin header in req.
+// If the Origin header is not set, it returns nil and nil.
+func Origin(config *Config, req *http.Request) (*url.URL, error) {
+ var origin string
+ switch config.Version {
+ case ProtocolVersionHybi13:
+ origin = req.Header.Get("Origin")
+ }
+ if origin == "" {
+ return nil, nil
+ }
+ return url.ParseRequestURI(origin)
+}
+
+func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {
+ if len(c.Protocol) > 0 {
+ if len(c.Protocol) != 1 {
+ // You need choose a Protocol in Handshake func in Server.
+ return ErrBadWebSocketProtocol
+ }
+ }
+ buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n")
+ buf.WriteString("Upgrade: websocket\r\n")
+ buf.WriteString("Connection: Upgrade\r\n")
+ buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n")
+ if len(c.Protocol) > 0 {
+ buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ if c.Header != nil {
+ err := c.Header.WriteSubset(buf, handshakeHeader)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString("\r\n")
+ return buf.Flush()
+}
+
+func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiServerConn(c.Config, buf, rwc, request)
+}
+
+// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.
+func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiConn(config, buf, rwc, request)
+}
diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go
new file mode 100644
index 0000000..0895dea
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/server.go
@@ -0,0 +1,113 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {
+ var hs serverHandshaker = &hybiServerHandshaker{Config: config}
+ code, err := hs.ReadHandshake(buf.Reader, req)
+ if err == ErrBadWebSocketVersion {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if err != nil {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if handshake != nil {
+ err = handshake(config, req)
+ if err != nil {
+ code = http.StatusForbidden
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ }
+ err = hs.AcceptHandshake(buf.Writer)
+ if err != nil {
+ code = http.StatusBadRequest
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ conn = hs.NewServerConn(buf, rwc, req)
+ return
+}
+
+// Server represents a server of a WebSocket.
+type Server struct {
+ // Config is a WebSocket configuration for new WebSocket connection.
+ Config
+
+ // Handshake is an optional function in WebSocket handshake.
+ // For example, you can check, or don't check Origin header.
+ // Another example, you can select config.Protocol.
+ Handshake func(*Config, *http.Request) error
+
+ // Handler handles a WebSocket connection.
+ Handler
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s.serveWebSocket(w, req)
+}
+
+func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {
+ rwc, buf, err := w.(http.Hijacker).Hijack()
+ if err != nil {
+ panic("Hijack failed: " + err.Error())
+ }
+ // The server should abort the WebSocket connection if it finds
+ // the client did not send a handshake that matches with protocol
+ // specification.
+ defer rwc.Close()
+ conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)
+ if err != nil {
+ return
+ }
+ if conn == nil {
+ panic("unexpected nil conn")
+ }
+ s.Handler(conn)
+}
+
+// Handler is a simple interface to a WebSocket browser client.
+// It checks if Origin header is valid URL by default.
+// You might want to verify websocket.Conn.Config().Origin in the func.
+// If you use Server instead of Handler, you could call websocket.Origin and
+// check the origin in your Handshake func. So, if you want to accept
+// non-browser clients, which do not send an Origin header, set a
+// Server.Handshake that does not check the origin.
+type Handler func(*Conn)
+
+func checkOrigin(config *Config, req *http.Request) (err error) {
+ config.Origin, err = Origin(config, req)
+ if err == nil && config.Origin == nil {
+ return fmt.Errorf("null origin")
+ }
+ return err
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s := Server{Handler: h, Handshake: checkOrigin}
+ s.serveWebSocket(w, req)
+}
diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go
new file mode 100644
index 0000000..6c45c73
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/websocket.go
@@ -0,0 +1,451 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements a client and server for the WebSocket protocol
+// as specified in RFC 6455.
+//
+// This package currently lacks some features found in alternative
+// and more actively maintained WebSocket packages:
+//
+// https://godoc.org/github.com/gorilla/websocket
+// https://godoc.org/nhooyr.io/websocket
+package websocket // import "golang.org/x/net/websocket"
+
+import (
+ "bufio"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+const (
+ ProtocolVersionHybi13 = 13
+ ProtocolVersionHybi = ProtocolVersionHybi13
+ SupportedProtocolVersion = "13"
+
+ ContinuationFrame = 0
+ TextFrame = 1
+ BinaryFrame = 2
+ CloseFrame = 8
+ PingFrame = 9
+ PongFrame = 10
+ UnknownFrame = 255
+
+ DefaultMaxPayloadBytes = 32 << 20 // 32MB
+)
+
+// ProtocolError represents WebSocket protocol errors.
+type ProtocolError struct {
+ ErrorString string
+}
+
+func (err *ProtocolError) Error() string { return err.ErrorString }
+
+var (
+ ErrBadProtocolVersion = &ProtocolError{"bad protocol version"}
+ ErrBadScheme = &ProtocolError{"bad scheme"}
+ ErrBadStatus = &ProtocolError{"bad status"}
+ ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"}
+ ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"}
+ ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"}
+ ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"}
+ ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"}
+ ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"}
+ ErrBadFrame = &ProtocolError{"bad frame"}
+ ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"}
+ ErrNotWebSocket = &ProtocolError{"not websocket protocol"}
+ ErrBadRequestMethod = &ProtocolError{"bad method"}
+ ErrNotSupported = &ProtocolError{"not supported"}
+)
+
+// ErrFrameTooLarge is returned by Codec's Receive method if payload size
+// exceeds limit set by Conn.MaxPayloadBytes
+var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit")
+
+// Addr is an implementation of net.Addr for WebSocket.
+type Addr struct {
+ *url.URL
+}
+
+// Network returns the network type for a WebSocket, "websocket".
+func (addr *Addr) Network() string { return "websocket" }
+
+// Config is a WebSocket configuration
+type Config struct {
+ // A WebSocket server address.
+ Location *url.URL
+
+ // A Websocket client origin.
+ Origin *url.URL
+
+ // WebSocket subprotocols.
+ Protocol []string
+
+ // WebSocket protocol version.
+ Version int
+
+ // TLS config for secure WebSocket (wss).
+ TlsConfig *tls.Config
+
+ // Additional header fields to be sent in WebSocket opening handshake.
+ Header http.Header
+
+ // Dialer used when opening websocket connections.
+ Dialer *net.Dialer
+
+ handshakeData map[string]string
+}
+
+// serverHandshaker is an interface to handle WebSocket server side handshake.
+type serverHandshaker interface {
+ // ReadHandshake reads handshake request message from client.
+ // Returns http response code and error if any.
+ ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)
+
+ // AcceptHandshake accepts the client handshake request and sends
+ // handshake response back to client.
+ AcceptHandshake(buf *bufio.Writer) (err error)
+
+ // NewServerConn creates a new WebSocket connection.
+ NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)
+}
+
+// frameReader is an interface to read a WebSocket frame.
+type frameReader interface {
+ // Reader is to read payload of the frame.
+ io.Reader
+
+ // PayloadType returns payload type.
+ PayloadType() byte
+
+ // HeaderReader returns a reader to read header of the frame.
+ HeaderReader() io.Reader
+
+ // TrailerReader returns a reader to read trailer of the frame.
+ // If it returns nil, there is no trailer in the frame.
+ TrailerReader() io.Reader
+
+ // Len returns total length of the frame, including header and trailer.
+ Len() int
+}
+
+// frameReaderFactory is an interface to creates new frame reader.
+type frameReaderFactory interface {
+ NewFrameReader() (r frameReader, err error)
+}
+
+// frameWriter is an interface to write a WebSocket frame.
+type frameWriter interface {
+ // Writer is to write payload of the frame.
+ io.WriteCloser
+}
+
+// frameWriterFactory is an interface to create new frame writer.
+type frameWriterFactory interface {
+ NewFrameWriter(payloadType byte) (w frameWriter, err error)
+}
+
+type frameHandler interface {
+ HandleFrame(frame frameReader) (r frameReader, err error)
+ WriteClose(status int) (err error)
+}
+
+// Conn represents a WebSocket connection.
+//
+// Multiple goroutines may invoke methods on a Conn simultaneously.
+type Conn struct {
+ config *Config
+ request *http.Request
+
+ buf *bufio.ReadWriter
+ rwc io.ReadWriteCloser
+
+ rio sync.Mutex
+ frameReaderFactory
+ frameReader
+
+ wio sync.Mutex
+ frameWriterFactory
+
+ frameHandler
+ PayloadType byte
+ defaultCloseStatus int
+
+ // MaxPayloadBytes limits the size of frame payload received over Conn
+ // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used.
+ MaxPayloadBytes int
+}
+
+// Read implements the io.Reader interface:
+// it reads data of a frame from the WebSocket connection.
+// if msg is not large enough for the frame data, it fills the msg and next Read
+// will read the rest of the frame data.
+// it reads Text frame or Binary frame.
+func (ws *Conn) Read(msg []byte) (n int, err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+again:
+ if ws.frameReader == nil {
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return 0, err
+ }
+ ws.frameReader, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return 0, err
+ }
+ if ws.frameReader == nil {
+ goto again
+ }
+ }
+ n, err = ws.frameReader.Read(msg)
+ if err == io.EOF {
+ if trailer := ws.frameReader.TrailerReader(); trailer != nil {
+ io.Copy(ioutil.Discard, trailer)
+ }
+ ws.frameReader = nil
+ goto again
+ }
+ return n, err
+}
+
+// Write implements the io.Writer interface:
+// it writes data as a frame to the WebSocket connection.
+func (ws *Conn) Write(msg []byte) (n int, err error) {
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// Close implements the io.Closer interface.
+func (ws *Conn) Close() error {
+ err := ws.frameHandler.WriteClose(ws.defaultCloseStatus)
+ err1 := ws.rwc.Close()
+ if err != nil {
+ return err
+ }
+ return err1
+}
+
+// IsClientConn reports whether ws is a client-side connection.
+func (ws *Conn) IsClientConn() bool { return ws.request == nil }
+
+// IsServerConn reports whether ws is a server-side connection.
+func (ws *Conn) IsServerConn() bool { return ws.request != nil }
+
+// LocalAddr returns the WebSocket Origin for the connection for client, or
+// the WebSocket location for server.
+func (ws *Conn) LocalAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Origin}
+ }
+ return &Addr{ws.config.Location}
+}
+
+// RemoteAddr returns the WebSocket location for the connection for client, or
+// the Websocket Origin for server.
+func (ws *Conn) RemoteAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Location}
+ }
+ return &Addr{ws.config.Origin}
+}
+
+var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
+
+// SetDeadline sets the connection's network read & write deadlines.
+func (ws *Conn) SetDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetReadDeadline sets the connection's network read deadline.
+func (ws *Conn) SetReadDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetReadDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetWriteDeadline sets the connection's network write deadline.
+func (ws *Conn) SetWriteDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetWriteDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// Config returns the WebSocket config.
+func (ws *Conn) Config() *Config { return ws.config }
+
+// Request returns the http request upgraded to the WebSocket.
+// It is nil for client side.
+func (ws *Conn) Request() *http.Request { return ws.request }
+
+// Codec represents a symmetric pair of functions that implement a codec.
+type Codec struct {
+ Marshal func(v interface{}) (data []byte, payloadType byte, err error)
+ Unmarshal func(data []byte, payloadType byte, v interface{}) (err error)
+}
+
+// Send sends v marshaled by cd.Marshal as single frame to ws.
+func (cd Codec) Send(ws *Conn, v interface{}) (err error) {
+ data, payloadType, err := cd.Marshal(v)
+ if err != nil {
+ return err
+ }
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(payloadType)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ w.Close()
+ return err
+}
+
+// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores
+// in v. The whole frame payload is read to an in-memory buffer; max size of
+// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds
+// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire
+// completely. The next call to Receive would read and discard leftover data of
+// previous oversized frame before processing next frame.
+func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+ if ws.frameReader != nil {
+ _, err = io.Copy(ioutil.Discard, ws.frameReader)
+ if err != nil {
+ return err
+ }
+ ws.frameReader = nil
+ }
+again:
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return err
+ }
+ frame, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return err
+ }
+ if frame == nil {
+ goto again
+ }
+ maxPayloadBytes := ws.MaxPayloadBytes
+ if maxPayloadBytes == 0 {
+ maxPayloadBytes = DefaultMaxPayloadBytes
+ }
+ if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) {
+ // payload size exceeds limit, no need to call Unmarshal
+ //
+ // set frameReader to current oversized frame so that
+ // the next call to this function can drain leftover
+ // data before processing the next frame
+ ws.frameReader = frame
+ return ErrFrameTooLarge
+ }
+ payloadType := frame.PayloadType()
+ data, err := ioutil.ReadAll(frame)
+ if err != nil {
+ return err
+ }
+ return cd.Unmarshal(data, payloadType, v)
+}
+
+func marshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ switch data := v.(type) {
+ case string:
+ return []byte(data), TextFrame, nil
+ case []byte:
+ return data, BinaryFrame, nil
+ }
+ return nil, UnknownFrame, ErrNotSupported
+}
+
+func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ switch data := v.(type) {
+ case *string:
+ *data = string(msg)
+ return nil
+ case *[]byte:
+ *data = msg
+ return nil
+ }
+ return ErrNotSupported
+}
+
+/*
+Message is a codec to send/receive text/binary data in a frame on WebSocket connection.
+To send/receive text frame, use string type.
+To send/receive binary frame, use []byte type.
+
+Trivial usage:
+
+ import "websocket"
+
+ // receive text frame
+ var message string
+ websocket.Message.Receive(ws, &message)
+
+ // send text frame
+ message = "hello"
+ websocket.Message.Send(ws, message)
+
+ // receive binary frame
+ var data []byte
+ websocket.Message.Receive(ws, &data)
+
+ // send binary frame
+ data = []byte{0, 1, 2}
+ websocket.Message.Send(ws, data)
+
+*/
+var Message = Codec{marshal, unmarshal}
+
+func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ msg, err = json.Marshal(v)
+ return msg, TextFrame, err
+}
+
+func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ return json.Unmarshal(msg, v)
+}
+
+/*
+JSON is a codec to send/receive JSON data in a frame from a WebSocket connection.
+
+Trivial usage:
+
+ import "websocket"
+
+ type T struct {
+ Msg string
+ Count int
+ }
+
+ // receive JSON type T
+ var data T
+ websocket.JSON.Receive(ws, &data)
+
+ // send JSON type T
+ websocket.JSON.Send(ws, data)
+*/
+var JSON = Codec{jsonMarshal, jsonUnmarshal}
diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/sys/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/sys/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/sys/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/sys/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore
new file mode 100644
index 0000000..e3e0fc6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/.gitignore
@@ -0,0 +1,2 @@
+_obj/
+unix.test
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
new file mode 100644
index 0000000..eb2f78a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -0,0 +1,173 @@
+# Building `sys/unix`
+
+The sys/unix package provides access to the raw system call interface of the
+underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
+
+Porting Go to a new architecture/OS combination or adding syscalls, types, or
+constants to an existing architecture/OS pair requires some manual effort;
+however, there are tools that automate much of the process.
+
+## Build Systems
+
+There are currently two ways we generate the necessary files. We are currently
+migrating the build system to use containers so the builds are reproducible.
+This is being done on an OS-by-OS basis. Please update this documentation as
+components of the build system change.
+
+### Old Build System (currently for `GOOS != "linux"`)
+
+The old build system generates the Go files based on the C header files
+present on your system. This means that files
+for a given GOOS/GOARCH pair must be generated on a system with that OS and
+architecture. This also means that the generated code can differ from system
+to system, based on differences in the header files.
+
+To avoid this, if you are using the old build system, only generate the Go
+files on an installation with unmodified header files. It is also important to
+keep track of which version of the OS the files were generated from (ex.
+Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
+and have each OS upgrade correspond to a single change.
+
+To build the files for your current OS and architecture, make sure GOOS and
+GOARCH are set correctly and run `mkall.sh`. This will generate the files for
+your specific system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, go
+
+### New Build System (currently for `GOOS == "linux"`)
+
+The new build system uses a Docker container to generate the go files directly
+from source checkouts of the kernel and various system libraries. This means
+that on any platform that supports Docker, all the files using the new build
+system can be generated at once, and generated files will not change based on
+what the person running the scripts has installed on their computer.
+
+The OS specific files for the new build system are located in the `${GOOS}`
+directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
+the kernel or system library updates, modify the Dockerfile at
+`${GOOS}/Dockerfile` to checkout the new release of the source.
+
+To build all the files under the new build system, you must be on an amd64/Linux
+system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
+then generate all of the files for all of the GOOS/GOARCH pairs in the new build
+system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, go, docker
+
+## Component files
+
+This section describes the various files used in the code generation process.
+It also contains instructions on how to modify these files to add a new
+architecture/OS or to add additional syscalls, types, or constants. Note that
+if you are using the new build system, the scripts/programs cannot be called normally.
+They must be called from within the docker container.
+
+### asm files
+
+The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
+call dispatch. There are three entry points:
+```
+ func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+ func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+ func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+```
+The first and second are the standard ones; they differ only in how many
+arguments can be passed to the kernel. The third is for low-level use by the
+ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
+let it know that a system call is running.
+
+When porting Go to an new architecture/OS, this file must be implemented for
+each GOOS/GOARCH pair.
+
+### mksysnum
+
+Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
+for the old system). This program takes in a list of header files containing the
+syscall number declarations and parses them to produce the corresponding list of
+Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
+constants.
+
+Adding new syscall numbers is mostly done by running the build on a sufficiently
+new installation of the target OS (or updating the source checkouts for the
+new build system). However, depending on the OS, you make need to update the
+parsing in mksysnum.
+
+### mksyscall.go
+
+The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
+hand-written Go files which implement system calls (for unix, the specific OS,
+or the specific OS/Architecture pair respectively) that need special handling
+and list `//sys` comments giving prototypes for ones that can be generated.
+
+The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
+them into syscalls. This requires the name of the prototype in the comment to
+match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
+prototype can be exported (capitalized) or not.
+
+Adding a new syscall often just requires adding a new `//sys` function prototype
+with the desired arguments and a capitalized name so it is exported. However, if
+you want the interface to the syscall to be different, often one will make an
+unexported `//sys` prototype, an then write a custom wrapper in
+`syscall_${GOOS}.go`.
+
+### types files
+
+For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
+`types_${GOOS}.go` on the old system). This file includes standard C headers and
+creates Go type aliases to the corresponding C types. The file is then fed
+through godef to get the Go compatible definitions. Finally, the generated code
+is fed though mkpost.go to format the code correctly and remove any hidden or
+private identifiers. This cleaned-up code is written to
+`ztypes_${GOOS}_${GOARCH}.go`.
+
+The hardest part about preparing this file is figuring out which headers to
+include and which symbols need to be `#define`d to get the actual data
+structures that pass through to the kernel system calls. Some C libraries
+preset alternate versions for binary compatibility and translate them on the
+way in and out of system calls, but there is almost always a `#define` that can
+get the real ones.
+See `types_darwin.go` and `linux/types.go` for examples.
+
+To add a new type, add in the necessary include statement at the top of the
+file (if it is not already there) and add in a type alias line. Note that if
+your type is significantly different on different architectures, you may need
+some `#if/#elif` macros in your include statements.
+
+### mkerrors.sh
+
+This script is used to generate the system's various constants. This doesn't
+just include the error numbers and error strings, but also the signal numbers
+an a wide variety of miscellaneous constants. The constants come from the list
+of include files in the `includes_${uname}` variable. A regex then picks out
+the desired `#define` statements, and generates the corresponding Go constants.
+The error numbers and strings are generated from `#include `, and the
+signal numbers and strings are generated from `#include `. All of
+these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
+`_errors.c`, which prints out all the constants.
+
+To add a constant, add the header that includes it to the appropriate variable.
+Then, edit the regex (if necessary) to match the desired constant. Avoid making
+the regex too broad to avoid matching unintended constants.
+
+
+## Generated files
+
+### `zerror_${GOOS}_${GOARCH}.go`
+
+A file containing all of the system's generated error numbers, error strings,
+signal numbers, and constants. Generated by `mkerrors.sh` (see above).
+
+### `zsyscall_${GOOS}_${GOARCH}.go`
+
+A file containing all the generated syscalls for a specific GOOS and GOARCH.
+Generated by `mksyscall.go` (see above).
+
+### `zsysnum_${GOOS}_${GOARCH}.go`
+
+A list of numeric constants for all the syscall number of the specific GOOS
+and GOARCH. Generated by mksysnum (see above).
+
+### `ztypes_${GOOS}_${GOARCH}.go`
+
+A file containing Go types for passing into (or returning from) syscalls.
+Generated by godefs and the types file (see above).
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
new file mode 100644
index 0000000..6e5c81a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -0,0 +1,86 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// CPU affinity functions
+
+package unix
+
+import (
+ "math/bits"
+ "unsafe"
+)
+
+const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
+
+// CPUSet represents a CPU affinity mask.
+type CPUSet [cpuSetSize]cpuMask
+
+func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
+ _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
+ if e != 0 {
+ return errnoErr(e)
+ }
+ return nil
+}
+
+// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedGetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
+}
+
+// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedSetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
+}
+
+// Zero clears the set s, so that it contains no CPUs.
+func (s *CPUSet) Zero() {
+ for i := range s {
+ s[i] = 0
+ }
+}
+
+func cpuBitsIndex(cpu int) int {
+ return cpu / _NCPUBITS
+}
+
+func cpuBitsMask(cpu int) cpuMask {
+ return cpuMask(1 << (uint(cpu) % _NCPUBITS))
+}
+
+// Set adds cpu to the set s.
+func (s *CPUSet) Set(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] |= cpuBitsMask(cpu)
+ }
+}
+
+// Clear removes cpu from the set s.
+func (s *CPUSet) Clear(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] &^= cpuBitsMask(cpu)
+ }
+}
+
+// IsSet reports whether cpu is in the set s.
+func (s *CPUSet) IsSet(cpu int) bool {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ return s[i]&cpuBitsMask(cpu) != 0
+ }
+ return false
+}
+
+// Count returns the number of CPUs in the set s.
+func (s *CPUSet) Count() int {
+ c := 0
+ for _, b := range s {
+ c += bits.OnesCount64(uint64(b))
+ }
+ return c
+}
diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go
new file mode 100644
index 0000000..951fce4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/aliases.go
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build go1.9
+
+package unix
+
+import "syscall"
+
+type Signal = syscall.Signal
+type Errno = syscall.Errno
+type SysProcAttr = syscall.SysProcAttr
diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
new file mode 100644
index 0000000..06f84b8
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
+//
+
+TEXT ·syscall6(SB),NOSPLIT,$0-88
+ JMP syscall·syscall6(SB)
+
+TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
+ JMP syscall·rawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s
new file mode 100644
index 0000000..8a72783
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
new file mode 100644
index 0000000..6321421
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s
new file mode 100644
index 0000000..333242d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s
@@ -0,0 +1,30 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+// +build arm,darwin
+
+#include "textflag.h"
+
+//
+// System call support for ARM, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
new file mode 100644
index 0000000..97e0174
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
@@ -0,0 +1,30 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+// +build arm64,darwin
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
new file mode 100644
index 0000000..603dd57
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, DragonFly
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s
new file mode 100644
index 0000000..c9a0a26
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
new file mode 100644
index 0000000..3517247
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
new file mode 100644
index 0000000..9227c87
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s
new file mode 100644
index 0000000..d9318cb
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM64, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s
new file mode 100644
index 0000000..448bebb
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s
@@ -0,0 +1,65 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for 386, Linux
+//
+
+// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
+// instead of the glibc-specific "CALL 0x10(GS)".
+#define INVOKE_SYSCALL INT $0x80
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ CALL runtime·entersyscall(SB)
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ RET
+
+TEXT ·socketcall(SB),NOSPLIT,$0-36
+ JMP syscall·socketcall(SB)
+
+TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
+ JMP syscall·rawsocketcall(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-28
+ JMP syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
new file mode 100644
index 0000000..c6468a9
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
@@ -0,0 +1,57 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for AMD64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ RET
+
+TEXT ·gettimeofday(SB),NOSPLIT,$0-16
+ JMP syscall·gettimeofday(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
new file mode 100644
index 0000000..cf0f357
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
@@ -0,0 +1,56 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for arm, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ BL runtime·entersyscall(SB)
+ MOVW trap+0(FP), R7
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ MOVW $0, R3
+ MOVW $0, R4
+ MOVW $0, R5
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW trap+0(FP), R7 // syscall entry
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ RET
+
+TEXT ·seek(SB),NOSPLIT,$0-28
+ B syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
new file mode 100644
index 0000000..afe6fdf
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
@@ -0,0 +1,52 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build arm64
+// +build !gccgo
+
+#include "textflag.h"
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP) // r1
+ MOVD R1, r2+40(FP) // r2
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP)
+ MOVD R1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
new file mode 100644
index 0000000..ab9d638
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
@@ -0,0 +1,56 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips64 mips64le
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for mips64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ JAL runtime·entersyscall(SB)
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
new file mode 100644
index 0000000..99e5399
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
@@ -0,0 +1,54 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips mipsle
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for mips, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ JAL runtime·entersyscall(SB)
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW R0, R7
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP) // r1
+ MOVW R3, r2+20(FP) // r2
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP)
+ MOVW R3, r2+20(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
new file mode 100644
index 0000000..88f7125
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
@@ -0,0 +1,44 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build ppc64 ppc64le
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
new file mode 100644
index 0000000..6db717d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
@@ -0,0 +1,54 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build riscv64,!gccgo
+
+#include "textflag.h"
+
+//
+// System calls for linux/riscv64.
+//
+// Where available, just jump to package syscall's implementation of
+// these functions.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV $0, A3
+ MOV $0, A4
+ MOV $0, A5
+ MOV $0, A6
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP) // r1
+ MOV A1, r2+40(FP) // r2
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV ZERO, A3
+ MOV ZERO, A4
+ MOV ZERO, A5
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP)
+ MOV A1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
new file mode 100644
index 0000000..a5a863c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
@@ -0,0 +1,56 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build s390x
+// +build linux
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for s390x, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ BR syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ BR syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ BR syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ BR syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s
new file mode 100644
index 0000000..48bdcd7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
new file mode 100644
index 0000000..2ede05c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
new file mode 100644
index 0000000..e892857
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s
new file mode 100644
index 0000000..6f98ba5
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM64, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s
new file mode 100644
index 0000000..00576f3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
new file mode 100644
index 0000000..790ef77
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s
new file mode 100644
index 0000000..469bfa1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s
new file mode 100644
index 0000000..0cedea3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for arm64, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
new file mode 100644
index 0000000..ded8260
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
+//
+
+TEXT ·sysvicall6(SB),NOSPLIT,$0-88
+ JMP syscall·sysvicall6(SB)
+
+TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88
+ JMP syscall·rawSysvicall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
new file mode 100644
index 0000000..a178a61
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Bluetooth sockets and messages
+
+package unix
+
+// Bluetooth Protocols
+const (
+ BTPROTO_L2CAP = 0
+ BTPROTO_HCI = 1
+ BTPROTO_SCO = 2
+ BTPROTO_RFCOMM = 3
+ BTPROTO_BNEP = 4
+ BTPROTO_CMTP = 5
+ BTPROTO_HIDP = 6
+ BTPROTO_AVDTP = 7
+)
+
+const (
+ HCI_CHANNEL_RAW = 0
+ HCI_CHANNEL_USER = 1
+ HCI_CHANNEL_MONITOR = 2
+ HCI_CHANNEL_CONTROL = 3
+ HCI_CHANNEL_LOGGING = 4
+)
+
+// Socketoption Level
+const (
+ SOL_BLUETOOTH = 0x112
+ SOL_HCI = 0x0
+ SOL_L2CAP = 0x6
+ SOL_RFCOMM = 0x12
+ SOL_SCO = 0x11
+)
diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go
new file mode 100644
index 0000000..df52048
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go
@@ -0,0 +1,195 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd
+
+package unix
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c
+
+const (
+ // This is the version of CapRights this package understands. See C implementation for parallels.
+ capRightsGoVersion = CAP_RIGHTS_VERSION_00
+ capArSizeMin = CAP_RIGHTS_VERSION_00 + 2
+ capArSizeMax = capRightsGoVersion + 2
+)
+
+var (
+ bit2idx = []int{
+ -1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
+ 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ }
+)
+
+func capidxbit(right uint64) int {
+ return int((right >> 57) & 0x1f)
+}
+
+func rightToIndex(right uint64) (int, error) {
+ idx := capidxbit(right)
+ if idx < 0 || idx >= len(bit2idx) {
+ return -2, fmt.Errorf("index for right 0x%x out of range", right)
+ }
+ return bit2idx[idx], nil
+}
+
+func caprver(right uint64) int {
+ return int(right >> 62)
+}
+
+func capver(rights *CapRights) int {
+ return caprver(rights.Rights[0])
+}
+
+func caparsize(rights *CapRights) int {
+ return capver(rights) + 2
+}
+
+// CapRightsSet sets the permissions in setrights in rights.
+func CapRightsSet(rights *CapRights, setrights []uint64) error {
+ // This is essentially a copy of cap_rights_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errors.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch")
+ }
+ rights.Rights[i] |= right
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsClear clears the permissions in clearrights from rights.
+func CapRightsClear(rights *CapRights, clearrights []uint64) error {
+ // This is essentially a copy of cap_rights_vclear()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errors.New("bad rights size")
+ }
+
+ for _, right := range clearrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch")
+ }
+ rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsIsSet checks whether all the permissions in setrights are present in rights.
+func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
+ // This is essentially a copy of cap_rights_is_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return false, fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return false, errors.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return false, errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return false, err
+ }
+ if i >= n {
+ return false, errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return false, errors.New("index mismatch")
+ }
+ if (rights.Rights[i] & right) != right {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func capright(idx uint64, bit uint64) uint64 {
+ return ((1 << (57 + idx)) | bit)
+}
+
+// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights.
+// See man cap_rights_init(3) and rights(4).
+func CapRightsInit(rights []uint64) (*CapRights, error) {
+ var r CapRights
+ r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0)
+ r.Rights[1] = capright(1, 0)
+
+ err := CapRightsSet(&r, rights)
+ if err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights.
+// The capability rights on fd can never be increased by CapRightsLimit.
+// See man cap_rights_limit(2) and rights(4).
+func CapRightsLimit(fd uintptr, rights *CapRights) error {
+ return capRightsLimit(int(fd), rights)
+}
+
+// CapRightsGet returns a CapRights structure containing the operations permitted on fd.
+// See man cap_rights_get(3) and rights(4).
+func CapRightsGet(fd uintptr) (*CapRights, error) {
+ r, err := CapRightsInit(nil)
+ if err != nil {
+ return nil, err
+ }
+ err = capRightsGet(capRightsGoVersion, int(fd), r)
+ if err != nil {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go
new file mode 100644
index 0000000..3a6ac64
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/constants.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+const (
+ R_OK = 0x4
+ W_OK = 0x2
+ X_OK = 0x1
+)
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
new file mode 100644
index 0000000..5e5fb45
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+// +build ppc
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 16) & 0xffff)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return uint64(((major) << 16) | (minor))
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
new file mode 100644
index 0000000..8b40124
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+// +build ppc64
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x3fffffff00000000) >> 32)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32((dev & 0x00000000ffffffff) >> 0)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ var DEVNO64 uint64
+ DEVNO64 = 0x8000000000000000
+ return ((uint64(major) << 32) | (uint64(minor) & 0x00000000FFFFFFFF) | DEVNO64)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go
new file mode 100644
index 0000000..8d1dc0f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Darwin's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a Darwin device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 24) & 0xff)
+}
+
+// Minor returns the minor component of a Darwin device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffffff)
+}
+
+// Mkdev returns a Darwin device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 24) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
new file mode 100644
index 0000000..8502f20
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Dragonfly's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a DragonFlyBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a DragonFlyBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a DragonFlyBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go
new file mode 100644
index 0000000..eba3b4b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_freebsd.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in FreeBSD's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a FreeBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a FreeBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a FreeBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go
new file mode 100644
index 0000000..d165d6f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_linux.go
@@ -0,0 +1,42 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by the Linux kernel and glibc.
+//
+// The information below is extracted and adapted from bits/sysmacros.h in the
+// glibc sources:
+//
+// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
+// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
+// number and m is a hex digit of the minor number. This is backward compatible
+// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
+// backward compatible with the Linux kernel, which for some architectures uses
+// 32-bit dev_t, encoded as mmmM MMmm.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ major := uint32((dev & 0x00000000000fff00) >> 8)
+ major |= uint32((dev & 0xfffff00000000000) >> 32)
+ return major
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x00000000000000ff) >> 0)
+ minor |= uint32((dev & 0x00000ffffff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) & 0x00000fff) << 8
+ dev |= (uint64(major) & 0xfffff000) << 32
+ dev |= (uint64(minor) & 0x000000ff) << 0
+ dev |= (uint64(minor) & 0xffffff00) << 12
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go
new file mode 100644
index 0000000..b4a203d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_netbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in NetBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a NetBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x000fff00) >> 8)
+}
+
+// Minor returns the minor component of a NetBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xfff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a NetBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x000fff00
+ dev |= (uint64(minor) << 12) & 0xfff00000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go
new file mode 100644
index 0000000..f3430c4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_openbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in OpenBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of an OpenBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x0000ff00) >> 8)
+}
+
+// Minor returns the minor component of an OpenBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xffff0000) >> 8)
+ return minor
+}
+
+// Mkdev returns an OpenBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x0000ff00
+ dev |= (uint64(minor) << 8) & 0xffff0000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go
new file mode 100644
index 0000000..304016b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dirent.go
@@ -0,0 +1,102 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+import "unsafe"
+
+// readInt returns the size-bytes unsigned integer in native byte order at offset off.
+func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
+ if len(b) < int(off+size) {
+ return 0, false
+ }
+ if isBigEndian {
+ return readIntBE(b[off:], size), true
+ }
+ return readIntLE(b[off:], size), true
+}
+
+func readIntBE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[1]) | uint64(b[0])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+func readIntLE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number of
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ count = 0
+ for max != 0 && len(buf) > 0 {
+ reclen, ok := direntReclen(buf)
+ if !ok || reclen > uint64(len(buf)) {
+ return origlen, count, names
+ }
+ rec := buf[:reclen]
+ buf = buf[reclen:]
+ ino, ok := direntIno(rec)
+ if !ok {
+ break
+ }
+ if ino == 0 { // File absent in directory.
+ continue
+ }
+ const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
+ namlen, ok := direntNamlen(rec)
+ if !ok || namoff+namlen > uint64(len(rec)) {
+ break
+ }
+ name := rec[namoff : namoff+namlen]
+ for i, c := range name {
+ if c == 0 {
+ name = name[:i]
+ break
+ }
+ }
+ // Check for useless names before allocating a string.
+ if string(name) == "." || string(name) == ".." {
+ continue
+ }
+ max--
+ count++
+ names = append(names, string(name))
+ }
+ return origlen - len(buf), count, names
+}
diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go
new file mode 100644
index 0000000..5e92690
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_big.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// +build ppc64 s390x mips mips64
+
+package unix
+
+const isBigEndian = true
diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go
new file mode 100644
index 0000000..bcdb5d3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_little.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le riscv64
+
+package unix
+
+const isBigEndian = false
diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go
new file mode 100644
index 0000000..84178b0
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/env_unix.go
@@ -0,0 +1,31 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+// Unix environment variables.
+
+package unix
+
+import "syscall"
+
+func Getenv(key string) (value string, found bool) {
+ return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+ return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+ syscall.Clearenv()
+}
+
+func Environ() []string {
+ return syscall.Environ()
+}
+
+func Unsetenv(key string) error {
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
new file mode 100644
index 0000000..c56bc8b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
@@ -0,0 +1,227 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ IFF_SMART = 0x20
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf2
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_IPXIP = 0xf9
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IP_FAITH = 0x16
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ SIOCADDRT = 0x8030720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8030720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
new file mode 100644
index 0000000..3e97711
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
@@ -0,0 +1,227 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ IFF_SMART = 0x20
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf2
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_IPXIP = 0xf9
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IP_FAITH = 0x16
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ SIOCADDRT = 0x8040720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8040720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
new file mode 100644
index 0000000..856dca3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
@@ -0,0 +1,226 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+
+ // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go
+ IFF_SMART = 0x20
+ IFT_FAITH = 0xf2
+ IFT_IPXIP = 0xf9
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IP_FAITH = 0x16
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ SIOCADDRT = 0x8030720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8030720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go
new file mode 100644
index 0000000..4dc5348
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly freebsd linux netbsd openbsd
+
+package unix
+
+import "unsafe"
+
+// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
+// systems by fcntl_linux_32bit.go to be SYS_FCNTL64.
+var fcntl64Syscall uintptr = SYS_FCNTL
+
+func fcntl(fd int, cmd, arg int) (int, error) {
+ valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg))
+ var err error
+ if errno != 0 {
+ err = errno
+ }
+ return int(valptr), err
+}
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
+ if errno == 0 {
+ return nil
+ }
+ return errno
+}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/vendor/golang.org/x/sys/unix/fcntl_darwin.go
new file mode 100644
index 0000000..5868a4a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl_darwin.go
@@ -0,0 +1,18 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "unsafe"
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk))))
+ return err
+}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
new file mode 100644
index 0000000..fc0e50e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
@@ -0,0 +1,13 @@
+// +build linux,386 linux,arm linux,mips linux,mipsle
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+func init() {
+ // On 32-bit Linux systems, the fcntl syscall that matches Go's
+ // Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
+ fcntl64Syscall = SYS_FCNTL64
+}
diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go
new file mode 100644
index 0000000..b27be0a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fdset.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+// Set adds fd to the set fds.
+func (fds *FdSet) Set(fd int) {
+ fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// Clear removes fd from the set fds.
+func (fds *FdSet) Clear(fd int) {
+ fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// IsSet returns whether fd is in the set fds.
+func (fds *FdSet) IsSet(fd int) bool {
+ return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0
+}
+
+// Zero clears the set fds.
+func (fds *FdSet) Zero() {
+ for i := range fds.Bits {
+ fds.Bits[i] = 0
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go
new file mode 100644
index 0000000..cd6f5a6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo.go
@@ -0,0 +1,62 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo
+// +build !aix
+
+package unix
+
+import "syscall"
+
+// We can't use the gc-syntax .s files for gccgo. On the plus side
+// much of the functionality can be written directly in Go.
+
+//extern gccgoRealSyscallNoError
+func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
+
+//extern gccgoRealSyscall
+func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
+
+func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ syscall.Entersyscall()
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0
+}
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0
+}
+
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
new file mode 100644
index 0000000..c44730c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -0,0 +1,39 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo
+// +build !aix
+
+#include
+#include
+#include
+
+#define _STRINGIFY2_(x) #x
+#define _STRINGIFY_(x) _STRINGIFY2_(x)
+#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
+
+// Call syscall from C code because the gccgo support for calling from
+// Go to C does not support varargs functions.
+
+struct ret {
+ uintptr_t r;
+ uintptr_t err;
+};
+
+struct ret
+gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ struct ret r;
+
+ errno = 0;
+ r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ r.err = errno;
+ return r;
+}
+
+uintptr_t
+gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
new file mode 100644
index 0000000..251a977
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo,linux,amd64
+
+package unix
+
+import "syscall"
+
+//extern gettimeofday
+func realGettimeofday(*Timeval, *byte) int32
+
+func gettimeofday(tv *Timeval) (err syscall.Errno) {
+ r := realGettimeofday(tv, nil)
+ if r < 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go
new file mode 100644
index 0000000..3559e5d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req uint, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value will usually be TCSETA or TIOCSETA.
+func IoctlSetTermios(fd int, req uint, value *Termios) error {
+ // TODO: if we get the chance, remove the req parameter.
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req uint) (int, error) {
+ var value int
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
+ var value Winsize
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlGetTermios(fd int, req uint) (*Termios, error) {
+ var value Termios
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
new file mode 100644
index 0000000..fa0c69b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -0,0 +1,229 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This script runs or (given -n) prints suggested commands to generate files for
+# the Architecture/OS specified by the GOARCH and GOOS environment variables.
+# See README.md for more information about how the build system works.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="go run mksyscall.go"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+mkasm=
+run="sh"
+cmd=""
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ # Run the command line that appears in the first line
+ # of the generated file to regenerate it.
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ cmd="echo"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+if [[ "$GOOS" = "linux" ]]; then
+ # Use the Docker-based build system
+ # Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ $cmd docker build --tag generate:$GOOS $GOOS
+ $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS
+ exit
+fi
+
+GOOSARCH_in=syscall_$GOOSARCH.go
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+aix_ppc)
+ mkerrors="$mkerrors -maix32"
+ mksyscall="go run mksyscall_aix_ppc.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+aix_ppc64)
+ mkerrors="$mkerrors -maix64"
+ mksyscall="go run mksyscall_aix_ppc64.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+darwin_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+darwin_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+darwin_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+dragonfly_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -dragonfly"
+ mksysnum="go run mksysnum.go 'https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -arm"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+freebsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -netbsd -arm"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+netbsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -openbsd -arm"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+solaris_amd64)
+ mksyscall="go run mksyscall_solaris.go"
+ mkerrors="$mkerrors -m64"
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ case "$GOOS" in
+ *)
+ syscall_goos="syscall_$GOOS.go"
+ case "$GOOS" in
+ darwin | dragonfly | freebsd | netbsd | openbsd)
+ syscall_goos="syscall_bsd.go $syscall_goos"
+ ;;
+ esac
+ if [ -n "$mksyscall" ]; then
+ if [ "$GOOSARCH" == "aix_ppc64" ]; then
+ # aix/ppc64 script generates files instead of writing to stdin.
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
+ elif [ "$GOOS" == "darwin" ]; then
+ # pre-1.12, direct syscalls
+ echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos syscall_darwin_${GOARCH}.1_11.go $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go";
+ # 1.12 and later, syscalls via libSystem
+ echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ # 1.13 and later, syscalls via libSystem (including syscallPtr)
+ echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go";
+ else
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ fi
+ fi
+ esac
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
+ if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi
+) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
new file mode 100644
index 0000000..6ffac92
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -0,0 +1,692 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Generate Go code listing errors and other #defined constant
+# values (ENAMETOOLONG etc.), by asking the preprocessor
+# about the definitions.
+
+unset LANG
+export LC_ALL=C
+export LC_CTYPE=C
+
+if test -z "$GOARCH" -o -z "$GOOS"; then
+ echo 1>&2 "GOARCH or GOOS not defined in environment"
+ exit 1
+fi
+
+# Check that we are using the new build system if we should
+if [[ "$GOOS" = "linux" ]] && [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
+ echo 1>&2 "In the Docker based build system, mkerrors should not be called directly."
+ echo 1>&2 "See README.md"
+ exit 1
+fi
+
+if [[ "$GOOS" = "aix" ]]; then
+ CC=${CC:-gcc}
+else
+ CC=${CC:-cc}
+fi
+
+if [[ "$GOOS" = "solaris" ]]; then
+ # Assumes GNU versions of utilities in PATH.
+ export PATH=/usr/gnu/bin:$PATH
+fi
+
+uname=$(uname)
+
+includes_AIX='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define AF_LOCAL AF_UNIX
+'
+
+includes_Darwin='
+#define _DARWIN_C_SOURCE
+#define KERNEL
+#define _DARWIN_USE_64_BIT_INODE
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+includes_DragonFly='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+includes_FreeBSD='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include