Upgrade dependencies
This commit is contained in:
parent
7f955dd0ad
commit
44faf5fa5d
@ -1,4 +1,4 @@
|
||||
ARG OPENCV_VERSION=v4.4.0
|
||||
ARG OPENCV_VERSION=v4.5.1
|
||||
|
||||
FROM golang:alpine as gobuilder
|
||||
|
||||
|
12
go.mod
12
go.mod
@ -3,10 +3,10 @@ module github.com/cyrilix/robocar-camera
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/cyrilix/robocar-base v0.1.1
|
||||
github.com/cyrilix/robocar-protobuf/go v1.0.1
|
||||
github.com/eclipse/paho.mqtt.golang v1.2.0
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
gocv.io/x/gocv v0.24.0
|
||||
github.com/cyrilix/robocar-base v0.1.2
|
||||
github.com/cyrilix/robocar-protobuf/go v1.0.2
|
||||
github.com/eclipse/paho.mqtt.golang v1.3.1
|
||||
github.com/golang/protobuf v1.4.3
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
gocv.io/x/gocv v0.26.0
|
||||
)
|
||||
|
40
go.sum
40
go.sum
@ -8,25 +8,25 @@ github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXn
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/cyrilix/robocar-base v0.1.1 h1:zpD9oeqv4g6bUgOP/+KTuEcCinuEJ25yPzLkQJBuZPU=
|
||||
github.com/cyrilix/robocar-base v0.1.1/go.mod h1:8z3uJpUJoQPDLUl66UD7aadYkNOsy2GipPN+OXPl4nk=
|
||||
github.com/cyrilix/robocar-protobuf/go v1.0.1 h1:VsnffnfAIzw5IMh1/+VFdKMYUkBwOuCKg4VUhf/ehxs=
|
||||
github.com/cyrilix/robocar-protobuf/go v1.0.1/go.mod h1:PwhcLPl3gOdb0IytATxVHQkadt4/KotLKCVxvqg9rpQ=
|
||||
github.com/cyrilix/robocar-base v0.1.2 h1:bhT5ohhviodCOd5usy013ctwu7ko+IfyDXnYHg7lJ8I=
|
||||
github.com/cyrilix/robocar-base v0.1.2/go.mod h1:G2SiYNUDAIv+65XWpiHnCXj7Jz2V6pOBLaIrcasGkww=
|
||||
github.com/cyrilix/robocar-protobuf/go v1.0.2 h1:eWwu7T07uvABh74bWOJa77alUs6VaWNEPd7Zezua2Cs=
|
||||
github.com/cyrilix/robocar-protobuf/go v1.0.2/go.mod h1:xj7H/a7qpvXgmW1983Fjd143Mz9Yt0C6RCxvB8M6pEM=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190506211059-b20a14b54661 h1:ZuxGvIvF01nfc/G9RJ5Q7Va1zQE2WJyG18Zv3DqCEf4=
|
||||
github.com/docker/docker v0.7.3-0.20190506211059-b20a14b54661/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
|
||||
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
|
||||
github.com/eclipse/paho.mqtt.golang v1.3.1 h1:6F5FYb1hxVSZS+p0ji5xBQamc5ltOolTYRy5R15uVmI=
|
||||
github.com/eclipse/paho.mqtt.golang v1.3.1/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
@ -35,7 +35,7 @@ github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvSc
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||
github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@ -49,8 +49,8 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@ -58,19 +58,19 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
@ -99,8 +99,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@ -110,11 +109,11 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/testcontainers/testcontainers-go v0.7.0/go.mod h1:4dloDPrC94+8ebXA+Iei3Jy+gxF6uHQssJkB3mlP9Rg=
|
||||
github.com/testcontainers/testcontainers-go v0.9.0/go.mod h1:b22BFXhRbg4PJmeMVWh6ftqjyZHgiIl3w274e9r3C2E=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
gocv.io/x/gocv v0.24.0 h1:xtm5AnFNUtFvSmU+R/CgX7FguL7EDGEubhDdviX2rPY=
|
||||
gocv.io/x/gocv v0.24.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
|
||||
gocv.io/x/gocv v0.26.0 h1:1azNvYEM245YN1bdw/WdX5YJzLg3Sr4STX0MqdWBIXM=
|
||||
gocv.io/x/gocv v0.26.0/go.mod h1:7Ju5KbPo+R85evmlhhKPVMwXtgDRNX/PtfVfbToSrLU=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@ -123,6 +122,7 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA=
|
||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@ -133,7 +133,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
1
vendor/github.com/cyrilix/robocar-base/service/part.go
generated
vendored
1
vendor/github.com/cyrilix/robocar-base/service/part.go
generated
vendored
@ -30,4 +30,3 @@ type Part interface {
|
||||
Start() error
|
||||
Stop()
|
||||
}
|
||||
|
||||
|
7
vendor/github.com/cyrilix/robocar-protobuf/go/events/events.pb.go
generated
vendored
7
vendor/github.com/cyrilix/robocar-protobuf/go/events/events.pb.go
generated
vendored
@ -1,13 +1,12 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0-devel
|
||||
// protoc v3.12.3
|
||||
// protoc v3.12.4
|
||||
// source: events/events.proto
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
@ -22,10 +21,6 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type DriveMode int32
|
||||
|
||||
const (
|
||||
|
91
vendor/github.com/eclipse/paho.mqtt.golang/LICENSE
generated
vendored
91
vendor/github.com/eclipse/paho.mqtt.golang/LICENSE
generated
vendored
@ -1,87 +1,20 @@
|
||||
Eclipse Public License - v 1.0
|
||||
This project is dual licensed under the Eclipse Public License 1.0 and the
|
||||
Eclipse Distribution License 1.0 as described in the epl-v10 and edl-v10 files.
|
||||
|
||||
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
|
||||
The EDL is copied below in order to pass the pkg.go.dev license check (https://pkg.go.dev/license-policy).
|
||||
|
||||
1. DEFINITIONS
|
||||
****
|
||||
Eclipse Distribution License - v 1.0
|
||||
|
||||
"Contribution" means:
|
||||
Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
|
||||
|
||||
a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
|
||||
All rights reserved.
|
||||
|
||||
b) in the case of each subsequent Contributor:
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
i) changes to the Program, and
|
||||
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
ii) additions to the Program;
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
|
||||
|
||||
"Contributor" means any person or entity that distributes the Program.
|
||||
|
||||
"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
|
||||
|
||||
"Program" means the Contributions distributed in accordance with this Agreement.
|
||||
|
||||
"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
|
||||
|
||||
2. GRANT OF RIGHTS
|
||||
|
||||
a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
|
||||
|
||||
b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
|
||||
|
||||
c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
|
||||
|
||||
d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
|
||||
|
||||
3. REQUIREMENTS
|
||||
|
||||
A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
|
||||
|
||||
a) it complies with the terms and conditions of this Agreement; and
|
||||
|
||||
b) its license agreement:
|
||||
|
||||
i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
|
||||
|
||||
ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
|
||||
|
||||
iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
|
||||
|
||||
iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
|
||||
|
||||
When the Program is made available in source code form:
|
||||
|
||||
a) it must be made available under this Agreement; and
|
||||
|
||||
b) a copy of this Agreement must be included with each copy of the Program.
|
||||
|
||||
Contributors may not remove or alter any copyright notices contained within the Program.
|
||||
|
||||
Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
|
||||
|
||||
4. COMMERCIAL DISTRIBUTION
|
||||
|
||||
Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
|
||||
|
||||
For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
|
||||
|
||||
5. NO WARRANTY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
|
||||
|
||||
6. DISCLAIMER OF LIABILITY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
7. GENERAL
|
||||
|
||||
If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
|
||||
|
||||
If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
|
||||
|
||||
All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
|
||||
|
||||
Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
|
||||
|
||||
This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
|
138
vendor/github.com/eclipse/paho.mqtt.golang/README.md
generated
vendored
138
vendor/github.com/eclipse/paho.mqtt.golang/README.md
generated
vendored
@ -1,32 +1,47 @@
|
||||
|
||||
[](https://godoc.org/github.com/eclipse/paho.mqtt.golang)
|
||||
[](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang)
|
||||
[](https://goreportcard.com/report/github.com/eclipse/paho.mqtt.golang)
|
||||
|
||||
Eclipse Paho MQTT Go client
|
||||
===========================
|
||||
|
||||
|
||||
This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT Go client library.
|
||||
This repository contains the source code for the [Eclipse Paho](https://eclipse.org/paho) MQTT 3.1/3.11 Go client library.
|
||||
|
||||
This code builds a library which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages.
|
||||
This code builds a library which enable applications to connect to an [MQTT](https://mqtt.org) broker to publish
|
||||
messages, and to subscribe to topics and receive published messages.
|
||||
|
||||
This library supports a fully asynchronous mode of operation.
|
||||
|
||||
A client supporting MQTT V5 is [also available](https://github.com/eclipse/paho.golang).
|
||||
|
||||
Installation and Build
|
||||
----------------------
|
||||
|
||||
This client is designed to work with the standard Go tools, so installation is as easy as:
|
||||
The process depends upon whether you are using [modules](https://golang.org/ref/mod) (recommended) or `GOPATH`.
|
||||
|
||||
#### Modules
|
||||
|
||||
If you are using [modules](https://blog.golang.org/using-go-modules) then `import "github.com/eclipse/paho.mqtt.golang"`
|
||||
and start using it. The necessary packages will be download automatically when you run `go build`.
|
||||
|
||||
Note that the latest release will be downloaded and changes may have been made since the release. If you have
|
||||
encountered an issue, or wish to try the latest code for another reason, then run
|
||||
`go get github.com/eclipse/paho.mqtt.golang@master` to get the latest commit.
|
||||
|
||||
#### GOPATH
|
||||
|
||||
Installation is as easy as:
|
||||
|
||||
```
|
||||
go get github.com/eclipse/paho.mqtt.golang
|
||||
```
|
||||
|
||||
The client depends on Google's [websockets](https://godoc.org/golang.org/x/net/websocket) and [proxy](https://godoc.org/golang.org/x/net/proxy) package,
|
||||
also easily installed with the commands:
|
||||
The client depends on Google's [proxy](https://godoc.org/golang.org/x/net/proxy) package and the
|
||||
[websockets](https://godoc.org/github.com/gorilla/websocket) package, also easily installed with the commands:
|
||||
|
||||
```
|
||||
go get golang.org/x/net/websocket
|
||||
go get github.com/gorilla/websocket
|
||||
go get golang.org/x/net/proxy
|
||||
```
|
||||
|
||||
@ -35,27 +50,115 @@ Usage and API
|
||||
-------------
|
||||
|
||||
Detailed API documentation is available by using to godoc tool, or can be browsed online
|
||||
using the [godoc.org](http://godoc.org/github.com/eclipse/paho.mqtt.golang) service.
|
||||
|
||||
Make use of the library by importing it in your Go client source code. For example,
|
||||
```
|
||||
import "github.com/eclipse/paho.mqtt.golang"
|
||||
```
|
||||
using the [pkg.go.dev](https://pkg.go.dev/github.com/eclipse/paho.mqtt.golang) service.
|
||||
|
||||
Samples are available in the `cmd` directory for reference.
|
||||
|
||||
Note:
|
||||
|
||||
Runtime tracing
|
||||
The library also supports using MQTT over websockets by using the `ws://` (unsecure) or `wss://` (secure) prefix in the
|
||||
URI. If the client is running behind a corporate http/https proxy then the following environment variables `HTTP_PROXY`,
|
||||
`HTTPS_PROXY` and `NO_PROXY` are taken into account when establishing the connection.
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
Tracing is enabled by assigning logs (from the Go log package) to the logging endpoints, ERROR, CRITICAL, WARN and DEBUG
|
||||
If you are new to MQTT and your application is not working as expected reviewing the
|
||||
[MQTT specification](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html), which this library implements,
|
||||
is a good first step. [MQTT.org](https://mqtt.org) has some [good resources](https://mqtt.org/getting-started/) that answer many
|
||||
common questions.
|
||||
|
||||
### Error Handling
|
||||
|
||||
The asynchronous nature of this library makes it easy to forget to check for errors. Consider using a go routine to
|
||||
log these:
|
||||
|
||||
```go
|
||||
t := client.Publish("topic", qos, retained, msg)
|
||||
go func() {
|
||||
_ = t.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
|
||||
if t.Error() != nil {
|
||||
log.Error(t.Error()) // Use your preferred logging technique (or just fmt.Printf)
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
### Logging
|
||||
|
||||
If you are encountering issues then enabling logging, both within this library and on your broker, is a good way to
|
||||
begin troubleshooting. This library can produce various levels of log by assigning the logging endpoints, ERROR,
|
||||
CRITICAL, WARN and DEBUG. For example:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
mqtt.ERROR = log.New(os.Stdout, "[ERROR] ", 0)
|
||||
mqtt.CRITICAL = log.New(os.Stdout, "[CRIT] ", 0)
|
||||
mqtt.WARN = log.New(os.Stdout, "[WARN] ", 0)
|
||||
mqtt.DEBUG = log.New(os.Stdout, "[DEBUG] ", 0)
|
||||
|
||||
// Connect, Subscribe, Publish etc..
|
||||
}
|
||||
```
|
||||
|
||||
### Common Problems
|
||||
|
||||
* Seemingly random disconnections may be caused by another client connecting to the broker with the same client
|
||||
identifier; this is as per the [spec](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc384800405).
|
||||
* A `MessageHandler` (called when a new message is received) must not block. If you wish to perform a long-running task,
|
||||
or publish a message, then please use a go routine (blocking in the handler is a common cause of unexpected `pingresp
|
||||
not received, disconnecting` errors).
|
||||
* When QOS1+ subscriptions have been created previously and you connect with `CleanSession` set to false it is possible that the broker will deliver retained
|
||||
messages before `Subscribe` can be called. To process these messages either configure a handler with `AddRoute` or
|
||||
set a `DefaultPublishHandler`.
|
||||
* Loss of network connectivity may not be detected immediately. If this is an issue then consider setting
|
||||
`ClientOptions.KeepAlive` (sends regular messages to check the link is active).
|
||||
* Brokers offer many configuration options; some settings may lead to unexpected results. If using Mosquitto check
|
||||
`max_inflight_messages`, `max_queued_messages`, `persistence` (the defaults may not be what you expect).
|
||||
|
||||
Reporting bugs
|
||||
--------------
|
||||
|
||||
Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues
|
||||
Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues
|
||||
|
||||
*A limited number of contributors monitor the issues section so if you have a general question please consider the
|
||||
resources in the [more information](#more-information) section (your question will be seen by more people, and you are
|
||||
likely to receive an answer more quickly).*
|
||||
|
||||
We welcome bug reports, but it is important they are actionable. A significant percentage of issues reported are not
|
||||
resolved due to a lack of information. If we cannot replicate the problem then it is unlikely we will be able to fix it.
|
||||
The information required will vary from issue to issue but consider including:
|
||||
|
||||
* Which version of the package you are using (tag or commit - this should be in your go.mod file)
|
||||
* A [Minimal, Reproducible Example](https://stackoverflow.com/help/minimal-reproducible-example). Providing an example
|
||||
is the best way to demonstrate the issue you are facing; it is important this includes all relevant information
|
||||
(including broker configuration). Docker (see `cmd/docker`) makes it relatively simple to provide a working end-to-end
|
||||
example.
|
||||
* A full, clear, description of the problem (detail what you are expecting vs what actually happens).
|
||||
* Details of your attempts to resolve the issue (what have you tried, what worked, what did not).
|
||||
* [Application Logs](#logging) covering the period the issue occurred. Unless you have isolated the root cause of the issue please include a link to a full log (including data from well before the problem arose).
|
||||
* Broker Logs covering the period the issue occurred.
|
||||
|
||||
It is important to remember that this library does not stand alone; it communicates with a broker and any issues you are
|
||||
seeing may be due to:
|
||||
|
||||
* Bugs in your code.
|
||||
* Bugs in this library.
|
||||
* The broker configuration.
|
||||
* Bugs in the broker.
|
||||
* Issues with whatever you are communicating with.
|
||||
|
||||
When submitting an issue, please ensure that you provide sufficient details to enable us to eliminate causes outside of
|
||||
this library.
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
We welcome pull requests but before your contribution can be accepted by the project, you need to create and
|
||||
electronically sign the Eclipse Contributor Agreement (ECA) and sign off on the Eclipse Foundation Certificate of Origin.
|
||||
|
||||
More information is available in the
|
||||
[Eclipse Development Resources](http://wiki.eclipse.org/Development_Resources/Contributing_via_Git); please take special
|
||||
note of the requirement that the commit record contain a "Signed-off-by" entry.
|
||||
|
||||
More information
|
||||
----------------
|
||||
@ -65,3 +168,6 @@ Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list
|
||||
General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt).
|
||||
|
||||
There is much more information available via the [MQTT community site](http://mqtt.org).
|
||||
|
||||
[Stack Overflow](https://stackoverflow.com/questions/tagged/mqtt+go) has a range questions covering a range of common
|
||||
issues (both relating to use of this library and MQTT in general).
|
||||
|
938
vendor/github.com/eclipse/paho.mqtt.golang/client.go
generated
vendored
938
vendor/github.com/eclipse/paho.mqtt.golang/client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1
vendor/github.com/eclipse/paho.mqtt.golang/components.go
generated
vendored
1
vendor/github.com/eclipse/paho.mqtt.golang/components.go
generated
vendored
@ -28,4 +28,5 @@ const (
|
||||
TST component = "[test] "
|
||||
STA component = "[state] "
|
||||
ERR component = "[error] "
|
||||
ROU component = "[router] "
|
||||
)
|
||||
|
14
vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
generated
vendored
14
vendor/github.com/eclipse/paho.mqtt.golang/filestore.go
generated
vendored
@ -101,7 +101,7 @@ func (store *FileStore) Get(key string) packets.ControlPacket {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to use file store, but not open")
|
||||
ERROR.Println(STR, "trying to use file store, but not open")
|
||||
return nil
|
||||
}
|
||||
filepath := fullpath(store.directory, key)
|
||||
@ -117,14 +117,16 @@ func (store *FileStore) Get(key string) packets.ControlPacket {
|
||||
if rerr != nil {
|
||||
newpath := corruptpath(store.directory, key)
|
||||
WARN.Println(STR, "corrupted file detected:", rerr.Error(), "archived at:", newpath)
|
||||
os.Rename(filepath, newpath)
|
||||
if err := os.Rename(filepath, newpath); err != nil {
|
||||
ERROR.Println(STR, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// All will provide a list of all of the keys associated with messages
|
||||
// currenly residing in the FileStore.
|
||||
// currently residing in the FileStore.
|
||||
func (store *FileStore) All() []string {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
@ -156,7 +158,7 @@ func (store *FileStore) all() []string {
|
||||
var files fileInfos
|
||||
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to use file store, but not open")
|
||||
ERROR.Println(STR, "trying to use file store, but not open")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -166,7 +168,7 @@ func (store *FileStore) all() []string {
|
||||
for _, f := range files {
|
||||
DEBUG.Println(STR, "file in All():", f.Name())
|
||||
name := f.Name()
|
||||
if name[len(name)-4:len(name)] != msgExt {
|
||||
if name[len(name)-4:] != msgExt {
|
||||
DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name)
|
||||
continue
|
||||
}
|
||||
@ -179,7 +181,7 @@ func (store *FileStore) all() []string {
|
||||
// lockless
|
||||
func (store *FileStore) del(key string) {
|
||||
if !store.opened {
|
||||
ERROR.Println(STR, "Trying to use file store, but not open")
|
||||
ERROR.Println(STR, "trying to use file store, but not open")
|
||||
return
|
||||
}
|
||||
DEBUG.Println(STR, "store del filepath:", store.directory)
|
||||
|
8
vendor/github.com/eclipse/paho.mqtt.golang/go.mod
generated
vendored
Normal file
8
vendor/github.com/eclipse/paho.mqtt.golang/go.mod
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
module github.com/eclipse/paho.mqtt.golang
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0
|
||||
)
|
8
vendor/github.com/eclipse/paho.mqtt.golang/go.sum
generated
vendored
Normal file
8
vendor/github.com/eclipse/paho.mqtt.golang/go.sum
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 h1:Jcxah/M+oLZ/R4/z5RzfPzGbPXnVDPkEDtf2JnuxN+U=
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
2
vendor/github.com/eclipse/paho.mqtt.golang/memstore.go
generated
vendored
2
vendor/github.com/eclipse/paho.mqtt.golang/memstore.go
generated
vendored
@ -88,7 +88,7 @@ func (store *MemoryStore) All() []string {
|
||||
ERROR.Println(STR, "Trying to use memory store, but not open")
|
||||
return nil
|
||||
}
|
||||
keys := []string{}
|
||||
var keys []string
|
||||
for k := range store.messages {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
4
vendor/github.com/eclipse/paho.mqtt.golang/message.go
generated
vendored
4
vendor/github.com/eclipse/paho.mqtt.golang/message.go
generated
vendored
@ -16,9 +16,9 @@ package mqtt
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Message defines the externals that a message implementation must support
|
||||
@ -114,7 +114,7 @@ func newConnectMsgFromOptions(options *ClientOptions, broker *url.URL) *packets.
|
||||
if username != "" {
|
||||
m.UsernameFlag = true
|
||||
m.Username = username
|
||||
//mustn't have password without user as well
|
||||
// mustn't have password without user as well
|
||||
if password != "" {
|
||||
m.PasswordFlag = true
|
||||
m.Password = []byte(password)
|
||||
|
69
vendor/github.com/eclipse/paho.mqtt.golang/messageids.go
generated
vendored
69
vendor/github.com/eclipse/paho.mqtt.golang/messageids.go
generated
vendored
@ -28,6 +28,8 @@ type MId uint16
|
||||
type messageIds struct {
|
||||
sync.RWMutex
|
||||
index map[uint16]tokenCompletor
|
||||
|
||||
lastIssuedID uint16 // The most recently issued ID. Used so we cycle through ids rather than immediately reusing them (can make debugging easier)
|
||||
}
|
||||
|
||||
const (
|
||||
@ -40,11 +42,11 @@ func (mids *messageIds) cleanUp() {
|
||||
for _, token := range mids.index {
|
||||
switch token.(type) {
|
||||
case *PublishToken:
|
||||
token.setError(fmt.Errorf("Connection lost before Publish completed"))
|
||||
token.setError(fmt.Errorf("connection lost before Publish completed"))
|
||||
case *SubscribeToken:
|
||||
token.setError(fmt.Errorf("Connection lost before Subscribe completed"))
|
||||
token.setError(fmt.Errorf("connection lost before Subscribe completed"))
|
||||
case *UnsubscribeToken:
|
||||
token.setError(fmt.Errorf("Connection lost before Unsubscribe completed"))
|
||||
token.setError(fmt.Errorf("connection lost before Unsubscribe completed"))
|
||||
case nil:
|
||||
continue
|
||||
}
|
||||
@ -71,18 +73,33 @@ func (mids *messageIds) claimID(token tokenCompletor, id uint16) {
|
||||
old.flowComplete()
|
||||
mids.index[id] = token
|
||||
}
|
||||
if id > mids.lastIssuedID {
|
||||
mids.lastIssuedID = id
|
||||
}
|
||||
}
|
||||
|
||||
// getID will return an available id or 0 if none available
|
||||
// The id will generally be the previous id + 1 (because this makes tracing messages a bit simpler)
|
||||
func (mids *messageIds) getID(t tokenCompletor) uint16 {
|
||||
mids.Lock()
|
||||
defer mids.Unlock()
|
||||
for i := midMin; i < midMax; i++ {
|
||||
i := mids.lastIssuedID // note: the only situation where lastIssuedID is 0 the map will be empty
|
||||
looped := false // uint16 will loop from 65535->0
|
||||
for {
|
||||
i++
|
||||
if i == 0 { // skip 0 because its not a valid id (Control Packets MUST contain a non-zero 16-bit Packet Identifier [MQTT-2.3.1-1])
|
||||
i++
|
||||
looped = true
|
||||
}
|
||||
if _, ok := mids.index[i]; !ok {
|
||||
mids.index[i] = t
|
||||
mids.lastIssuedID = i
|
||||
return i
|
||||
}
|
||||
if (looped && i == mids.lastIssuedID) || (mids.lastIssuedID == 0 && i == midMax) { // lastIssuedID will be 0 at startup
|
||||
return 0 // no free ids
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (mids *messageIds) getToken(id uint16) tokenCompletor {
|
||||
@ -98,14 +115,23 @@ type DummyToken struct {
|
||||
id uint16
|
||||
}
|
||||
|
||||
// Wait implements the Token Wait method.
|
||||
func (d *DummyToken) Wait() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// WaitTimeout implements the Token WaitTimeout method.
|
||||
func (d *DummyToken) WaitTimeout(t time.Duration) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Done implements the Token Done method.
|
||||
func (d *DummyToken) Done() <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (d *DummyToken) flowComplete() {
|
||||
ERROR.Printf("A lookup for token %d returned nil\n", d.id)
|
||||
}
|
||||
@ -115,3 +141,36 @@ func (d *DummyToken) Error() error {
|
||||
}
|
||||
|
||||
func (d *DummyToken) setError(e error) {}
|
||||
|
||||
// PlaceHolderToken does nothing and was implemented to allow a messageid to be reserved
|
||||
// it differs from DummyToken in that calling flowComplete does not generate an error (it
|
||||
// is expected that flowComplete will be called when the token is overwritten with a real token)
|
||||
type PlaceHolderToken struct {
|
||||
id uint16
|
||||
}
|
||||
|
||||
// Wait implements the Token Wait method.
|
||||
func (p *PlaceHolderToken) Wait() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// WaitTimeout implements the Token WaitTimeout method.
|
||||
func (p *PlaceHolderToken) WaitTimeout(t time.Duration) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Done implements the Token Done method.
|
||||
func (p *PlaceHolderToken) Done() <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (p *PlaceHolderToken) flowComplete() {
|
||||
}
|
||||
|
||||
func (p *PlaceHolderToken) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PlaceHolderToken) setError(e error) {}
|
||||
|
633
vendor/github.com/eclipse/paho.mqtt.golang/net.go
generated
vendored
633
vendor/github.com/eclipse/paho.mqtt.golang/net.go
generated
vendored
@ -15,341 +15,450 @@
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
"golang.org/x/net/proxy"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
func signalError(c chan<- error, err error) {
|
||||
select {
|
||||
case c <- err:
|
||||
const closedNetConnErrorText = "use of closed network connection" // error string for closed conn (https://golang.org/src/net/error_test.go)
|
||||
|
||||
// ConnectMQTT takes a connected net.Conn and performs the initial MQTT handshake. Parameters are:
|
||||
// conn - Connected net.Conn
|
||||
// cm - Connect Packet with everything other than the protocol name/version populated (historical reasons)
|
||||
// protocolVersion - The protocol version to attempt to connect with
|
||||
//
|
||||
// Note that, for backward compatibility, ConnectMQTT() suppresses the actual connection error (compare to connectMQTT()).
|
||||
func ConnectMQTT(conn net.Conn, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool) {
|
||||
rc, sessionPresent, _ := connectMQTT(conn, cm, protocolVersion)
|
||||
return rc, sessionPresent
|
||||
}
|
||||
|
||||
func connectMQTT(conn io.ReadWriter, cm *packets.ConnectPacket, protocolVersion uint) (byte, bool, error) {
|
||||
switch protocolVersion {
|
||||
case 3:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
|
||||
cm.ProtocolName = "MQIsdp"
|
||||
cm.ProtocolVersion = 3
|
||||
case 0x83:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1b protocol")
|
||||
cm.ProtocolName = "MQIsdp"
|
||||
cm.ProtocolVersion = 0x83
|
||||
case 0x84:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol")
|
||||
cm.ProtocolName = "MQTT"
|
||||
cm.ProtocolVersion = 0x84
|
||||
default:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
|
||||
cm.ProtocolName = "MQTT"
|
||||
cm.ProtocolVersion = 4
|
||||
}
|
||||
|
||||
if err := cm.Write(conn); err != nil {
|
||||
ERROR.Println(CLI, err)
|
||||
return packets.ErrNetworkError, false, err
|
||||
}
|
||||
|
||||
rc, sessionPresent, err := verifyCONNACK(conn)
|
||||
return rc, sessionPresent, err
|
||||
}
|
||||
|
||||
func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header) (net.Conn, error) {
|
||||
switch uri.Scheme {
|
||||
case "ws":
|
||||
config, _ := websocket.NewConfig(uri.String(), fmt.Sprintf("http://%s", uri.Host))
|
||||
config.Protocol = []string{"mqtt"}
|
||||
config.Header = headers
|
||||
config.Dialer = &net.Dialer{Timeout: timeout}
|
||||
conn, err := websocket.DialConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn.PayloadType = websocket.BinaryFrame
|
||||
return conn, err
|
||||
case "wss":
|
||||
config, _ := websocket.NewConfig(uri.String(), fmt.Sprintf("https://%s", uri.Host))
|
||||
config.Protocol = []string{"mqtt"}
|
||||
config.TlsConfig = tlsc
|
||||
config.Header = headers
|
||||
config.Dialer = &net.Dialer{Timeout: timeout}
|
||||
conn, err := websocket.DialConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn.PayloadType = websocket.BinaryFrame
|
||||
return conn, err
|
||||
case "tcp":
|
||||
allProxy := os.Getenv("all_proxy")
|
||||
if len(allProxy) == 0 {
|
||||
conn, err := net.DialTimeout("tcp", uri.Host, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
proxyDialer := proxy.FromEnvironment()
|
||||
// This function is only used for receiving a connack
|
||||
// when the connection is first started.
|
||||
// This prevents receiving incoming data while resume
|
||||
// is in progress if clean session is false.
|
||||
func verifyCONNACK(conn io.Reader) (byte, bool, error) {
|
||||
DEBUG.Println(NET, "connect started")
|
||||
|
||||
conn, err := proxyDialer.Dial("tcp", uri.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
case "unix":
|
||||
conn, err := net.DialTimeout("unix", uri.Host, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
case "ssl":
|
||||
fallthrough
|
||||
case "tls":
|
||||
fallthrough
|
||||
case "tcps":
|
||||
allProxy := os.Getenv("all_proxy")
|
||||
if len(allProxy) == 0 {
|
||||
conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
proxyDialer := proxy.FromEnvironment()
|
||||
|
||||
conn, err := proxyDialer.Dial("tcp", uri.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tlsConn := tls.Client(conn, tlsc)
|
||||
|
||||
err = tlsConn.Handshake()
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tlsConn, nil
|
||||
ca, err := packets.ReadPacket(conn)
|
||||
if err != nil {
|
||||
ERROR.Println(NET, "connect got error", err)
|
||||
return packets.ErrNetworkError, false, err
|
||||
}
|
||||
return nil, errors.New("Unknown protocol")
|
||||
|
||||
if ca == nil {
|
||||
ERROR.Println(NET, "received nil packet")
|
||||
return packets.ErrNetworkError, false, errors.New("nil CONNACK packet")
|
||||
}
|
||||
|
||||
msg, ok := ca.(*packets.ConnackPacket)
|
||||
if !ok {
|
||||
ERROR.Println(NET, "received msg that was not CONNACK")
|
||||
return packets.ErrNetworkError, false, errors.New("non-CONNACK first packet received")
|
||||
}
|
||||
|
||||
DEBUG.Println(NET, "received connack")
|
||||
return msg.ReturnCode, msg.SessionPresent, nil
|
||||
}
|
||||
|
||||
// actually read incoming messages off the wire
|
||||
// send Message object into ibound channel
|
||||
func incoming(c *client) {
|
||||
// inbound encapsulates the output from startIncoming.
|
||||
// err - If != nil then an error has occurred
|
||||
// cp - A control packet received over the network link
|
||||
type inbound struct {
|
||||
err error
|
||||
cp packets.ControlPacket
|
||||
}
|
||||
|
||||
// startIncoming initiates a goroutine that reads incoming messages off the wire and sends them to the channel (returned).
|
||||
// If there are any issues with the network connection then the returned channel will be closed and the goroutine will exit
|
||||
// (so closing the connection will terminate the goroutine)
|
||||
func startIncoming(conn io.Reader) <-chan inbound {
|
||||
var err error
|
||||
var cp packets.ControlPacket
|
||||
|
||||
defer c.workers.Done()
|
||||
ibound := make(chan inbound)
|
||||
|
||||
DEBUG.Println(NET, "incoming started")
|
||||
|
||||
for {
|
||||
if cp, err = packets.ReadPacket(c.conn); err != nil {
|
||||
break
|
||||
}
|
||||
DEBUG.Println(NET, "Received Message")
|
||||
select {
|
||||
case c.ibound <- cp:
|
||||
// Notify keepalive logic that we recently received a packet
|
||||
if c.options.KeepAlive != 0 {
|
||||
c.lastReceived.Store(time.Now())
|
||||
}
|
||||
case <-c.stop:
|
||||
// This avoids a deadlock should a message arrive while shutting down.
|
||||
// In that case the "reader" of c.ibound might already be gone
|
||||
WARN.Println(NET, "incoming dropped a received message during shutdown")
|
||||
break
|
||||
}
|
||||
}
|
||||
// We received an error on read.
|
||||
// If disconnect is in progress, swallow error and return
|
||||
select {
|
||||
case <-c.stop:
|
||||
DEBUG.Println(NET, "incoming stopped")
|
||||
return
|
||||
// Not trying to disconnect, send the error to the errors channel
|
||||
default:
|
||||
ERROR.Println(NET, "incoming stopped with error", err)
|
||||
signalError(c.errors, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// receive a Message object on obound, and then
|
||||
// actually send outgoing message to the wire
|
||||
func outgoing(c *client) {
|
||||
defer c.workers.Done()
|
||||
DEBUG.Println(NET, "outgoing started")
|
||||
|
||||
for {
|
||||
DEBUG.Println(NET, "outgoing waiting for an outbound message")
|
||||
select {
|
||||
case <-c.stop:
|
||||
DEBUG.Println(NET, "outgoing stopped")
|
||||
return
|
||||
case pub := <-c.obound:
|
||||
msg := pub.p.(*packets.PublishPacket)
|
||||
|
||||
if c.options.WriteTimeout > 0 {
|
||||
c.conn.SetWriteDeadline(time.Now().Add(c.options.WriteTimeout))
|
||||
}
|
||||
|
||||
if err := msg.Write(c.conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing stopped with error", err)
|
||||
pub.t.setError(err)
|
||||
signalError(c.errors, err)
|
||||
return
|
||||
}
|
||||
|
||||
if c.options.WriteTimeout > 0 {
|
||||
// If we successfully wrote, we don't want the timeout to happen during an idle period
|
||||
// so we reset it to infinite.
|
||||
c.conn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
if msg.Qos == 0 {
|
||||
pub.t.flowComplete()
|
||||
}
|
||||
DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID)
|
||||
case msg := <-c.oboundP:
|
||||
switch msg.p.(type) {
|
||||
case *packets.SubscribePacket:
|
||||
msg.p.(*packets.SubscribePacket).MessageID = c.getID(msg.t)
|
||||
case *packets.UnsubscribePacket:
|
||||
msg.p.(*packets.UnsubscribePacket).MessageID = c.getID(msg.t)
|
||||
}
|
||||
DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p))
|
||||
if err := msg.p.Write(c.conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing stopped with error", err)
|
||||
if msg.t != nil {
|
||||
msg.t.setError(err)
|
||||
go func() {
|
||||
for {
|
||||
if cp, err = packets.ReadPacket(conn); err != nil {
|
||||
// We do not want to log the error if it is due to the network connection having been closed
|
||||
// elsewhere (i.e. after sending DisconnectPacket). Detecting this situation is the subject of
|
||||
// https://github.com/golang/go/issues/4373
|
||||
if !strings.Contains(err.Error(), closedNetConnErrorText) {
|
||||
ibound <- inbound{err: err}
|
||||
}
|
||||
signalError(c.errors, err)
|
||||
return
|
||||
}
|
||||
switch msg.p.(type) {
|
||||
case *packets.DisconnectPacket:
|
||||
msg.t.(*DisconnectToken).flowComplete()
|
||||
DEBUG.Println(NET, "outbound wrote disconnect, stopping")
|
||||
close(ibound)
|
||||
DEBUG.Println(NET, "incoming complete")
|
||||
return
|
||||
}
|
||||
DEBUG.Println(NET, "startIncoming Received Message")
|
||||
ibound <- inbound{cp: cp}
|
||||
}
|
||||
// Reset ping timer after sending control packet.
|
||||
if c.options.KeepAlive != 0 {
|
||||
c.lastSent.Store(time.Now())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ibound
|
||||
}
|
||||
|
||||
// receive Message objects on ibound
|
||||
// store messages if necessary
|
||||
// send replies on obound
|
||||
// delete messages from store if necessary
|
||||
func alllogic(c *client) {
|
||||
defer c.workers.Done()
|
||||
DEBUG.Println(NET, "logic started")
|
||||
// incomingComms encapsulates the possible output of the incomingComms routine. If err != nil then an error has occurred and
|
||||
// the routine will have terminated; otherwise one of the other members should be non-nil
|
||||
type incomingComms struct {
|
||||
err error // If non-nil then there has been an error (ignore everything else)
|
||||
outbound *PacketAndToken // Packet (with token) than needs to be sent out (e.g. an acknowledgement)
|
||||
incomingPub *packets.PublishPacket // A new publish has been received; this will need to be passed on to our user
|
||||
}
|
||||
|
||||
for {
|
||||
DEBUG.Println(NET, "logic waiting for msg on ibound")
|
||||
// startIncomingComms initiates incoming communications; this includes starting a goroutine to process incoming
|
||||
// messages.
|
||||
// Accepts a channel of inbound messages from the store (persisted messages); note this must be closed as soon as the
|
||||
// everything in the store has been sent.
|
||||
// Returns a channel that will be passed any received packets; this will be closed on a network error (and inboundFromStore closed)
|
||||
func startIncomingComms(conn io.Reader,
|
||||
c commsFns,
|
||||
inboundFromStore <-chan packets.ControlPacket,
|
||||
) <-chan incomingComms {
|
||||
ibound := startIncoming(conn) // Start goroutine that reads from network connection
|
||||
output := make(chan incomingComms)
|
||||
|
||||
DEBUG.Println(NET, "startIncomingComms started")
|
||||
go func() {
|
||||
for {
|
||||
if inboundFromStore == nil && ibound == nil {
|
||||
close(output)
|
||||
DEBUG.Println(NET, "startIncomingComms goroutine complete")
|
||||
return // As soon as ibound is closed we can exit (should have already processed an error)
|
||||
}
|
||||
DEBUG.Println(NET, "logic waiting for msg on ibound")
|
||||
|
||||
var msg packets.ControlPacket
|
||||
var ok bool
|
||||
select {
|
||||
case msg, ok = <-inboundFromStore:
|
||||
if !ok {
|
||||
DEBUG.Println(NET, "startIncomingComms: inboundFromStore complete")
|
||||
inboundFromStore = nil // should happen quickly as this is only for persisted messages
|
||||
continue
|
||||
}
|
||||
DEBUG.Println(NET, "startIncomingComms: got msg from store")
|
||||
case ibMsg, ok := <-ibound:
|
||||
if !ok {
|
||||
DEBUG.Println(NET, "startIncomingComms: ibound complete")
|
||||
ibound = nil
|
||||
continue
|
||||
}
|
||||
DEBUG.Println(NET, "startIncomingComms: got msg on ibound")
|
||||
// If the inbound comms routine encounters any issues it will send us an error.
|
||||
if ibMsg.err != nil {
|
||||
output <- incomingComms{err: ibMsg.err}
|
||||
continue // Usually the channel will be closed immediately after sending an error but safer that we do not assume this
|
||||
}
|
||||
msg = ibMsg.cp
|
||||
|
||||
c.persistInbound(msg)
|
||||
c.UpdateLastReceived() // Notify keepalive logic that we recently received a packet
|
||||
}
|
||||
|
||||
select {
|
||||
case msg := <-c.ibound:
|
||||
DEBUG.Println(NET, "logic got msg on ibound")
|
||||
persistInbound(c.persist, msg)
|
||||
switch m := msg.(type) {
|
||||
case *packets.PingrespPacket:
|
||||
DEBUG.Println(NET, "received pingresp")
|
||||
atomic.StoreInt32(&c.pingOutstanding, 0)
|
||||
DEBUG.Println(NET, "startIncomingComms: received pingresp")
|
||||
c.pingRespReceived()
|
||||
case *packets.SubackPacket:
|
||||
DEBUG.Println(NET, "received suback, id:", m.MessageID)
|
||||
DEBUG.Println(NET, "startIncomingComms: received suback, id:", m.MessageID)
|
||||
token := c.getToken(m.MessageID)
|
||||
switch t := token.(type) {
|
||||
case *SubscribeToken:
|
||||
DEBUG.Println(NET, "granted qoss", m.ReturnCodes)
|
||||
|
||||
if t, ok := token.(*SubscribeToken); ok {
|
||||
DEBUG.Println(NET, "startIncomingComms: granted qoss", m.ReturnCodes)
|
||||
for i, qos := range m.ReturnCodes {
|
||||
t.subResult[t.subs[i]] = qos
|
||||
}
|
||||
}
|
||||
|
||||
token.flowComplete()
|
||||
c.freeID(m.MessageID)
|
||||
case *packets.UnsubackPacket:
|
||||
DEBUG.Println(NET, "received unsuback, id:", m.MessageID)
|
||||
DEBUG.Println(NET, "startIncomingComms: received unsuback, id:", m.MessageID)
|
||||
c.getToken(m.MessageID).flowComplete()
|
||||
c.freeID(m.MessageID)
|
||||
case *packets.PublishPacket:
|
||||
DEBUG.Println(NET, "received publish, msgId:", m.MessageID)
|
||||
DEBUG.Println(NET, "putting msg on onPubChan")
|
||||
switch m.Qos {
|
||||
case 2:
|
||||
c.incomingPubChan <- m
|
||||
DEBUG.Println(NET, "done putting msg on incomingPubChan")
|
||||
case 1:
|
||||
c.incomingPubChan <- m
|
||||
DEBUG.Println(NET, "done putting msg on incomingPubChan")
|
||||
case 0:
|
||||
select {
|
||||
case c.incomingPubChan <- m:
|
||||
case <-c.stop:
|
||||
}
|
||||
DEBUG.Println(NET, "done putting msg on incomingPubChan")
|
||||
}
|
||||
DEBUG.Println(NET, "startIncomingComms: received publish, msgId:", m.MessageID)
|
||||
output <- incomingComms{incomingPub: m}
|
||||
case *packets.PubackPacket:
|
||||
DEBUG.Println(NET, "received puback, id:", m.MessageID)
|
||||
// c.receipts.get(msg.MsgId()) <- Receipt{}
|
||||
// c.receipts.end(msg.MsgId())
|
||||
DEBUG.Println(NET, "startIncomingComms: received puback, id:", m.MessageID)
|
||||
c.getToken(m.MessageID).flowComplete()
|
||||
c.freeID(m.MessageID)
|
||||
case *packets.PubrecPacket:
|
||||
DEBUG.Println(NET, "received pubrec, id:", m.MessageID)
|
||||
DEBUG.Println(NET, "startIncomingComms: received pubrec, id:", m.MessageID)
|
||||
prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
|
||||
prel.MessageID = m.MessageID
|
||||
select {
|
||||
case c.oboundP <- &PacketAndToken{p: prel, t: nil}:
|
||||
case <-c.stop:
|
||||
}
|
||||
output <- incomingComms{outbound: &PacketAndToken{p: prel, t: nil}}
|
||||
case *packets.PubrelPacket:
|
||||
DEBUG.Println(NET, "received pubrel, id:", m.MessageID)
|
||||
DEBUG.Println(NET, "startIncomingComms: received pubrel, id:", m.MessageID)
|
||||
pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
|
||||
pc.MessageID = m.MessageID
|
||||
persistOutbound(c.persist, pc)
|
||||
select {
|
||||
case c.oboundP <- &PacketAndToken{p: pc, t: nil}:
|
||||
case <-c.stop:
|
||||
}
|
||||
c.persistOutbound(pc)
|
||||
output <- incomingComms{outbound: &PacketAndToken{p: pc, t: nil}}
|
||||
case *packets.PubcompPacket:
|
||||
DEBUG.Println(NET, "received pubcomp, id:", m.MessageID)
|
||||
DEBUG.Println(NET, "startIncomingComms: received pubcomp, id:", m.MessageID)
|
||||
c.getToken(m.MessageID).flowComplete()
|
||||
c.freeID(m.MessageID)
|
||||
}
|
||||
case <-c.stop:
|
||||
WARN.Println(NET, "logic stopped")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return output
|
||||
}
|
||||
|
||||
func (c *client) ackFunc(packet *packets.PublishPacket) func() {
|
||||
// startOutgoingComms initiates a go routine to transmit outgoing packets.
|
||||
// Pass in an open network connection and channels for outbound messages (including those triggered
|
||||
// directly from incoming comms).
|
||||
// Returns a channel that will receive details of any errors (closed when the goroutine exits)
|
||||
// This function wil only terminate when all input channels are closed
|
||||
func startOutgoingComms(conn net.Conn,
|
||||
c commsFns,
|
||||
oboundp <-chan *PacketAndToken,
|
||||
obound <-chan *PacketAndToken,
|
||||
oboundFromIncoming <-chan *PacketAndToken,
|
||||
) <-chan error {
|
||||
errChan := make(chan error)
|
||||
DEBUG.Println(NET, "outgoing started")
|
||||
|
||||
go func() {
|
||||
for {
|
||||
DEBUG.Println(NET, "outgoing waiting for an outbound message")
|
||||
|
||||
// This goroutine will only exits when all of the input channels we receive on have been closed. This approach is taken to avoid any
|
||||
// deadlocks (if the connection goes down there are limited options as to what we can do with anything waiting on us and
|
||||
// throwing away the packets seems the best option)
|
||||
if oboundp == nil && obound == nil && oboundFromIncoming == nil {
|
||||
DEBUG.Println(NET, "outgoing comms stopping")
|
||||
close(errChan)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case pub, ok := <-obound:
|
||||
if !ok {
|
||||
obound = nil
|
||||
continue
|
||||
}
|
||||
msg := pub.p.(*packets.PublishPacket)
|
||||
DEBUG.Println(NET, "obound msg to write", msg.MessageID)
|
||||
|
||||
writeTimeout := c.getWriteTimeOut()
|
||||
if writeTimeout > 0 {
|
||||
if err := conn.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil {
|
||||
ERROR.Println(NET, "SetWriteDeadline ", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := msg.Write(conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing obound reporting error ", err)
|
||||
pub.t.setError(err)
|
||||
// report error if it's not due to the connection being closed elsewhere
|
||||
if !strings.Contains(err.Error(), closedNetConnErrorText) {
|
||||
errChan <- err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if writeTimeout > 0 {
|
||||
// If we successfully wrote, we don't want the timeout to happen during an idle period
|
||||
// so we reset it to infinite.
|
||||
if err := conn.SetWriteDeadline(time.Time{}); err != nil {
|
||||
ERROR.Println(NET, "SetWriteDeadline to 0 ", err)
|
||||
}
|
||||
}
|
||||
|
||||
if msg.Qos == 0 {
|
||||
pub.t.flowComplete()
|
||||
}
|
||||
DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID)
|
||||
case msg, ok := <-oboundp:
|
||||
if !ok {
|
||||
oboundp = nil
|
||||
continue
|
||||
}
|
||||
DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p))
|
||||
if err := msg.p.Write(conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing oboundp reporting error ", err)
|
||||
if msg.t != nil {
|
||||
msg.t.setError(err)
|
||||
}
|
||||
errChan <- err
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := msg.p.(*packets.DisconnectPacket); ok {
|
||||
msg.t.(*DisconnectToken).flowComplete()
|
||||
DEBUG.Println(NET, "outbound wrote disconnect, closing connection")
|
||||
// As per the MQTT spec "After sending a DISCONNECT Packet the Client MUST close the Network Connection"
|
||||
// Closing the connection will cause the goroutines to end in sequence (starting with incoming comms)
|
||||
conn.Close()
|
||||
}
|
||||
case msg, ok := <-oboundFromIncoming: // message triggered by an inbound message (PubrecPacket or PubrelPacket)
|
||||
if !ok {
|
||||
oboundFromIncoming = nil
|
||||
continue
|
||||
}
|
||||
DEBUG.Println(NET, "obound from incoming msg to write, type", reflect.TypeOf(msg.p), " ID ", msg.p.Details().MessageID)
|
||||
if err := msg.p.Write(conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing oboundFromIncoming reporting error", err)
|
||||
if msg.t != nil {
|
||||
msg.t.setError(err)
|
||||
}
|
||||
errChan <- err
|
||||
continue
|
||||
}
|
||||
}
|
||||
c.UpdateLastSent() // Record that a packet has been received (for keepalive routine)
|
||||
}
|
||||
}()
|
||||
return errChan
|
||||
}
|
||||
|
||||
// commsFns provide access to the client state (messageids, requesting disconnection and updating timing)
|
||||
type commsFns interface {
|
||||
getToken(id uint16) tokenCompletor // Retrieve the token for the specified messageid (if none then a dummy token must be returned)
|
||||
freeID(id uint16) // Release the specified messageid (clearing out of any persistent store)
|
||||
UpdateLastReceived() // Must be called whenever a packet is received
|
||||
UpdateLastSent() // Must be called whenever a packet is successfully sent
|
||||
getWriteTimeOut() time.Duration // Return the writetimeout (or 0 if none)
|
||||
persistOutbound(m packets.ControlPacket) // add the packet to the outbound store
|
||||
persistInbound(m packets.ControlPacket) // add the packet to the inbound store
|
||||
pingRespReceived() // Called when a ping response is received
|
||||
}
|
||||
|
||||
// startComms initiates goroutines that handles communications over the network connection
|
||||
// Messages will be stored (via commsFns) and deleted from the store as necessary
|
||||
// It returns two channels:
|
||||
// packets.PublishPacket - Will receive publish packets received over the network.
|
||||
// Closed when incoming comms routines exit (on shutdown or if network link closed)
|
||||
// error - Any errors will be sent on this channel. The channel is closed when all comms routines have shut down
|
||||
//
|
||||
// Note: The comms routines monitoring oboundp and obound will not shutdown until those channels are both closed. Any messages received between the
|
||||
// connection being closed and those channels being closed will generate errors (and nothing will be sent). That way the chance of a deadlock is
|
||||
// minimised.
|
||||
func startComms(conn net.Conn, // Network connection (must be active)
|
||||
c commsFns, // getters and setters to enable us to cleanly interact with client
|
||||
inboundFromStore <-chan packets.ControlPacket, // Inbound packets from the persistence store (should be closed relatively soon after startup)
|
||||
oboundp <-chan *PacketAndToken,
|
||||
obound <-chan *PacketAndToken) (
|
||||
<-chan *packets.PublishPacket, // Publishpackages received over the network
|
||||
<-chan error, // Any errors (should generally trigger a disconnect)
|
||||
) {
|
||||
// Start inbound comms handler; this needs to be able to transmit messages so we start a go routine to add these to the priority outbound channel
|
||||
ibound := startIncomingComms(conn, c, inboundFromStore)
|
||||
outboundFromIncoming := make(chan *PacketAndToken) // Will accept outgoing messages triggered by startIncomingComms (e.g. acknowledgements)
|
||||
|
||||
// Start the outgoing handler. It is important to note that output from startIncomingComms is fed into startOutgoingComms (for ACK's)
|
||||
oboundErr := startOutgoingComms(conn, c, oboundp, obound, outboundFromIncoming)
|
||||
DEBUG.Println(NET, "startComms started")
|
||||
|
||||
// Run up go routines to handle the output from the above comms functions - these are handled in separate
|
||||
// go routines because they can interact (e.g. ibound triggers an ACK to obound which triggers an error)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
outPublish := make(chan *packets.PublishPacket)
|
||||
outError := make(chan error)
|
||||
|
||||
// Any messages received get passed to the appropriate channel
|
||||
go func() {
|
||||
for ic := range ibound {
|
||||
if ic.err != nil {
|
||||
outError <- ic.err
|
||||
continue
|
||||
}
|
||||
if ic.outbound != nil {
|
||||
outboundFromIncoming <- ic.outbound
|
||||
continue
|
||||
}
|
||||
if ic.incomingPub != nil {
|
||||
outPublish <- ic.incomingPub
|
||||
continue
|
||||
}
|
||||
ERROR.Println(STR, "startComms received empty incomingComms msg")
|
||||
}
|
||||
// Close channels that will not be written to again (allowing other routines to exit)
|
||||
close(outboundFromIncoming)
|
||||
close(outPublish)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Any errors will be passed out to our caller
|
||||
go func() {
|
||||
for err := range oboundErr {
|
||||
outError <- err
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// outError is used by both routines so can only be closed when they are both complete
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(outError)
|
||||
DEBUG.Println(NET, "startComms closing outError")
|
||||
}()
|
||||
|
||||
return outPublish, outError
|
||||
}
|
||||
|
||||
// ackFunc acknowledges a packet
|
||||
// WARNING the function returned must not be called if the comms routine is shutting down or not running
|
||||
// (it needs outgoing comms in order to send the acknowledgement). Currently this is only called from
|
||||
// matchAndDispatch which will be shutdown before the comms are
|
||||
func ackFunc(oboundP chan *PacketAndToken, persist Store, packet *packets.PublishPacket) func() {
|
||||
return func() {
|
||||
switch packet.Qos {
|
||||
case 2:
|
||||
pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
|
||||
pr.MessageID = packet.MessageID
|
||||
DEBUG.Println(NET, "putting pubrec msg on obound")
|
||||
select {
|
||||
case c.oboundP <- &PacketAndToken{p: pr, t: nil}:
|
||||
case <-c.stop:
|
||||
}
|
||||
oboundP <- &PacketAndToken{p: pr, t: nil}
|
||||
DEBUG.Println(NET, "done putting pubrec msg on obound")
|
||||
case 1:
|
||||
pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
|
||||
pa.MessageID = packet.MessageID
|
||||
DEBUG.Println(NET, "putting puback msg on obound")
|
||||
persistOutbound(c.persist, pa)
|
||||
select {
|
||||
case c.oboundP <- &PacketAndToken{p: pa, t: nil}:
|
||||
case <-c.stop:
|
||||
}
|
||||
persistOutbound(persist, pa)
|
||||
oboundP <- &PacketAndToken{p: pa, t: nil}
|
||||
DEBUG.Println(NET, "done putting puback msg on obound")
|
||||
case 0:
|
||||
// do nothing, since there is no need to send an ack packet back
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errorWatch(c *client) {
|
||||
defer c.workers.Done()
|
||||
select {
|
||||
case <-c.stop:
|
||||
WARN.Println(NET, "errorWatch stopped")
|
||||
return
|
||||
case err := <-c.errors:
|
||||
ERROR.Println(NET, "error triggered, stopping")
|
||||
go c.internalConnLost(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
91
vendor/github.com/eclipse/paho.mqtt.golang/netconn.go
generated
vendored
Normal file
91
vendor/github.com/eclipse/paho.mqtt.golang/netconn.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
//
|
||||
// This just establishes the network connection; once established the type of connection should be irrelevant
|
||||
//
|
||||
|
||||
// openConnection opens a network connection using the protocol indicated in the URL. Does not carry out any MQTT specific handshakes
|
||||
func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header, websocketOptions *WebsocketOptions) (net.Conn, error) {
|
||||
switch uri.Scheme {
|
||||
case "ws":
|
||||
conn, err := NewWebsocket(uri.String(), nil, timeout, headers, websocketOptions)
|
||||
return conn, err
|
||||
case "wss":
|
||||
conn, err := NewWebsocket(uri.String(), tlsc, timeout, headers, websocketOptions)
|
||||
return conn, err
|
||||
case "mqtt", "tcp":
|
||||
allProxy := os.Getenv("all_proxy")
|
||||
if len(allProxy) == 0 {
|
||||
conn, err := net.DialTimeout("tcp", uri.Host, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
proxyDialer := proxy.FromEnvironment()
|
||||
|
||||
conn, err := proxyDialer.Dial("tcp", uri.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
case "unix":
|
||||
conn, err := net.DialTimeout("unix", uri.Host, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
case "ssl", "tls", "mqtts", "mqtt+ssl", "tcps":
|
||||
allProxy := os.Getenv("all_proxy")
|
||||
if len(allProxy) == 0 {
|
||||
conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
proxyDialer := proxy.FromEnvironment()
|
||||
|
||||
conn, err := proxyDialer.Dial("tcp", uri.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tlsConn := tls.Client(conn, tlsc)
|
||||
|
||||
err = tlsConn.Handshake()
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tlsConn, nil
|
||||
}
|
||||
return nil, errors.New("unknown protocol")
|
||||
}
|
67
vendor/github.com/eclipse/paho.mqtt.golang/options.go
generated
vendored
67
vendor/github.com/eclipse/paho.mqtt.golang/options.go
generated
vendored
@ -10,6 +10,7 @@
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
* Måns Ansgariusson
|
||||
*/
|
||||
|
||||
// Portions copyright © 2018 TIBCO Software Inc.
|
||||
@ -20,6 +21,7 @@ import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@ -44,6 +46,10 @@ type ConnectionLostHandler func(Client, error)
|
||||
// at initial connection and on reconnection
|
||||
type OnConnectHandler func(Client)
|
||||
|
||||
// ReconnectHandler is invoked prior to reconnecting after
|
||||
// the initial connection is lost
|
||||
type ReconnectHandler func(Client, *ClientOptions)
|
||||
|
||||
// ClientOptions contains configurable options for an Client.
|
||||
type ClientOptions struct {
|
||||
Servers []*url.URL
|
||||
@ -66,14 +72,18 @@ type ClientOptions struct {
|
||||
ConnectTimeout time.Duration
|
||||
MaxReconnectInterval time.Duration
|
||||
AutoReconnect bool
|
||||
ConnectRetryInterval time.Duration
|
||||
ConnectRetry bool
|
||||
Store Store
|
||||
DefaultPublishHandler MessageHandler
|
||||
OnConnect OnConnectHandler
|
||||
OnConnectionLost ConnectionLostHandler
|
||||
OnReconnecting ReconnectHandler
|
||||
WriteTimeout time.Duration
|
||||
MessageChannelDepth uint
|
||||
ResumeSubs bool
|
||||
HTTPHeaders http.Header
|
||||
WebsocketOptions *WebsocketOptions
|
||||
}
|
||||
|
||||
// NewClientOptions will create a new ClientClientOptions type with some
|
||||
@ -105,13 +115,15 @@ func NewClientOptions() *ClientOptions {
|
||||
ConnectTimeout: 30 * time.Second,
|
||||
MaxReconnectInterval: 10 * time.Minute,
|
||||
AutoReconnect: true,
|
||||
ConnectRetryInterval: 30 * time.Second,
|
||||
ConnectRetry: false,
|
||||
Store: nil,
|
||||
OnConnect: nil,
|
||||
OnConnectionLost: DefaultConnectionLostHandler,
|
||||
WriteTimeout: 0, // 0 represents timeout disabled
|
||||
MessageChannelDepth: 100,
|
||||
ResumeSubs: false,
|
||||
HTTPHeaders: make(map[string][]string),
|
||||
WebsocketOptions: &WebsocketOptions{},
|
||||
}
|
||||
return o
|
||||
}
|
||||
@ -125,12 +137,14 @@ func NewClientOptions() *ClientOptions {
|
||||
//
|
||||
// An example broker URI would look like: tcp://foobar.com:1883
|
||||
func (o *ClientOptions) AddBroker(server string) *ClientOptions {
|
||||
re := regexp.MustCompile(`%(25)?`)
|
||||
if len(server) > 0 && server[0] == ':' {
|
||||
server = "127.0.0.1" + server
|
||||
}
|
||||
if !strings.Contains(server, "://") {
|
||||
server = "tcp://" + server
|
||||
}
|
||||
server = re.ReplaceAllLiteralString(server, "%25")
|
||||
brokerURI, err := url.Parse(server)
|
||||
if err != nil {
|
||||
ERROR.Println(CLI, "Failed to parse %q broker address: %s", server, err)
|
||||
@ -149,7 +163,7 @@ func (o *ClientOptions) SetResumeSubs(resume bool) *ClientOptions {
|
||||
|
||||
// SetClientID will set the client id to be used by this client when
|
||||
// connecting to the MQTT broker. According to the MQTT v3.1 specification,
|
||||
// a client id mus be no longer than 23 characters.
|
||||
// a client id must be no longer than 23 characters.
|
||||
func (o *ClientOptions) SetClientID(id string) *ClientOptions {
|
||||
o.ClientID = id
|
||||
return o
|
||||
@ -157,7 +171,7 @@ func (o *ClientOptions) SetClientID(id string) *ClientOptions {
|
||||
|
||||
// SetUsername will set the username to be used by this client when connecting
|
||||
// to the MQTT broker. Note: without the use of SSL/TLS, this information will
|
||||
// be sent in plaintext accross the wire.
|
||||
// be sent in plaintext across the wire.
|
||||
func (o *ClientOptions) SetUsername(u string) *ClientOptions {
|
||||
o.Username = u
|
||||
return o
|
||||
@ -165,7 +179,7 @@ func (o *ClientOptions) SetUsername(u string) *ClientOptions {
|
||||
|
||||
// SetPassword will set the password to be used by this client when connecting
|
||||
// to the MQTT broker. Note: without the use of SSL/TLS, this information will
|
||||
// be sent in plaintext accross the wire.
|
||||
// be sent in plaintext across the wire.
|
||||
func (o *ClientOptions) SetPassword(p string) *ClientOptions {
|
||||
o.Password = p
|
||||
return o
|
||||
@ -174,7 +188,7 @@ func (o *ClientOptions) SetPassword(p string) *ClientOptions {
|
||||
// SetCredentialsProvider will set a method to be called by this client when
|
||||
// connecting to the MQTT broker that provide the current username and password.
|
||||
// Note: without the use of SSL/TLS, this information will be sent
|
||||
// in plaintext accross the wire.
|
||||
// in plaintext across the wire.
|
||||
func (o *ClientOptions) SetCredentialsProvider(p CredentialsProvider) *ClientOptions {
|
||||
o.CredentialsProvider = p
|
||||
return o
|
||||
@ -184,7 +198,7 @@ func (o *ClientOptions) SetCredentialsProvider(p CredentialsProvider) *ClientOpt
|
||||
// when this client connects to an MQTT broker. By setting this flag, you are
|
||||
// indicating that no messages saved by the broker for this client should be
|
||||
// delivered. Any messages that were going to be sent by this client before
|
||||
// diconnecting previously but didn't will not be sent upon connecting to the
|
||||
// disconnecting previously but didn't will not be sent upon connecting to the
|
||||
// broker.
|
||||
func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions {
|
||||
o.CleanSession = clean
|
||||
@ -195,6 +209,7 @@ func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions {
|
||||
// each QoS level. By default, this value is true. If set to false,
|
||||
// this flag indicates that messages can be delivered asynchronously
|
||||
// from the client to the application and possibly arrive out of order.
|
||||
// Specifically, the message handler is called in its own go routine.
|
||||
func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions {
|
||||
o.Order = order
|
||||
return o
|
||||
@ -293,15 +308,22 @@ func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *
|
||||
return o
|
||||
}
|
||||
|
||||
// SetReconnectingHandler sets the OnReconnecting callback to be executed prior
|
||||
// to the client attempting a reconnect to the MQTT broker.
|
||||
func (o *ClientOptions) SetReconnectingHandler(cb ReconnectHandler) *ClientOptions {
|
||||
o.OnReconnecting = cb
|
||||
return o
|
||||
}
|
||||
|
||||
// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a
|
||||
// timeout error. A duration of 0 never times out. Default 30 seconds
|
||||
// timeout error. A duration of 0 never times out. Default never times out
|
||||
func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions {
|
||||
o.WriteTimeout = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetConnectTimeout limits how long the client will wait when trying to open a connection
|
||||
// to an MQTT server before timeing out and erroring the attempt. A duration of 0 never times out.
|
||||
// to an MQTT server before timing out. A duration of 0 never times out.
|
||||
// Default 30 seconds. Currently only operational on TCP/TLS connections.
|
||||
func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions {
|
||||
o.ConnectTimeout = t
|
||||
@ -323,10 +345,25 @@ func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions {
|
||||
return o
|
||||
}
|
||||
|
||||
// SetMessageChannelDepth sets the size of the internal queue that holds messages while the
|
||||
// client is temporairily offline, allowing the application to publish when the client is
|
||||
// reconnecting. This setting is only valid if AutoReconnect is set to true, it is otherwise
|
||||
// ignored.
|
||||
// SetConnectRetryInterval sets the time that will be waited between connection attempts
|
||||
// when initially connecting if ConnectRetry is TRUE
|
||||
func (o *ClientOptions) SetConnectRetryInterval(t time.Duration) *ClientOptions {
|
||||
o.ConnectRetryInterval = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetConnectRetry sets whether the connect function will automatically retry the connection
|
||||
// in the event of a failure (when true the token returned by the Connect function will
|
||||
// not complete until the connection is up or it is cancelled)
|
||||
// If ConnectRetry is true then subscriptions should be requested in OnConnect handler
|
||||
// Setting this to TRUE permits messages to be published before the connection is established
|
||||
func (o *ClientOptions) SetConnectRetry(a bool) *ClientOptions {
|
||||
o.ConnectRetry = a
|
||||
return o
|
||||
}
|
||||
|
||||
// SetMessageChannelDepth DEPRECATED The value set here no longer has any effect, this function
|
||||
// remains so the API is not altered.
|
||||
func (o *ClientOptions) SetMessageChannelDepth(s uint) *ClientOptions {
|
||||
o.MessageChannelDepth = s
|
||||
return o
|
||||
@ -338,3 +375,9 @@ func (o *ClientOptions) SetHTTPHeaders(h http.Header) *ClientOptions {
|
||||
o.HTTPHeaders = h
|
||||
return o
|
||||
}
|
||||
|
||||
// SetWebsocketOptions sets the additional websocket options used in a WebSocket connection
|
||||
func (o *ClientOptions) SetWebsocketOptions(w *WebsocketOptions) *ClientOptions {
|
||||
o.WebsocketOptions = w
|
||||
return o
|
||||
}
|
||||
|
30
vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go
generated
vendored
30
vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go
generated
vendored
@ -26,7 +26,7 @@ type ClientOptionsReader struct {
|
||||
options *ClientOptions
|
||||
}
|
||||
|
||||
//Servers returns a slice of the servers defined in the clientoptions
|
||||
// Servers returns a slice of the servers defined in the clientoptions
|
||||
func (r *ClientOptionsReader) Servers() []*url.URL {
|
||||
s := make([]*url.URL, len(r.options.Servers))
|
||||
|
||||
@ -38,31 +38,31 @@ func (r *ClientOptionsReader) Servers() []*url.URL {
|
||||
return s
|
||||
}
|
||||
|
||||
//ResumeSubs returns true if resuming stored (un)sub is enabled
|
||||
// ResumeSubs returns true if resuming stored (un)sub is enabled
|
||||
func (r *ClientOptionsReader) ResumeSubs() bool {
|
||||
s := r.options.ResumeSubs
|
||||
return s
|
||||
}
|
||||
|
||||
//ClientID returns the set client id
|
||||
// ClientID returns the set client id
|
||||
func (r *ClientOptionsReader) ClientID() string {
|
||||
s := r.options.ClientID
|
||||
return s
|
||||
}
|
||||
|
||||
//Username returns the set username
|
||||
// Username returns the set username
|
||||
func (r *ClientOptionsReader) Username() string {
|
||||
s := r.options.Username
|
||||
return s
|
||||
}
|
||||
|
||||
//Password returns the set password
|
||||
// Password returns the set password
|
||||
func (r *ClientOptionsReader) Password() string {
|
||||
s := r.options.Password
|
||||
return s
|
||||
}
|
||||
|
||||
//CleanSession returns whether Cleansession is set
|
||||
// CleanSession returns whether Cleansession is set
|
||||
func (r *ClientOptionsReader) CleanSession() bool {
|
||||
s := r.options.CleanSession
|
||||
return s
|
||||
@ -133,6 +133,18 @@ func (r *ClientOptionsReader) AutoReconnect() bool {
|
||||
return s
|
||||
}
|
||||
|
||||
// ConnectRetryInterval returns the delay between retries on the initial connection (if ConnectRetry true)
|
||||
func (r *ClientOptionsReader) ConnectRetryInterval() time.Duration {
|
||||
s := r.options.ConnectRetryInterval
|
||||
return s
|
||||
}
|
||||
|
||||
// ConnectRetry returns whether the initial connection request will be retried until connection established
|
||||
func (r *ClientOptionsReader) ConnectRetry() bool {
|
||||
s := r.options.ConnectRetry
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *ClientOptionsReader) WriteTimeout() time.Duration {
|
||||
s := r.options.WriteTimeout
|
||||
return s
|
||||
@ -147,3 +159,9 @@ func (r *ClientOptionsReader) HTTPHeaders() http.Header {
|
||||
h := r.options.HTTPHeaders
|
||||
return h
|
||||
}
|
||||
|
||||
// WebsocketOptions returns the currently configured WebSocket options
|
||||
func (r *ClientOptionsReader) WebsocketOptions() *WebsocketOptions {
|
||||
s := r.options.WebsocketOptions
|
||||
return s
|
||||
}
|
||||
|
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go
generated
vendored
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go
generated
vendored
@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//ConnackPacket is an internal representation of the fields of the
|
||||
//Connack MQTT packet
|
||||
// ConnackPacket is an internal representation of the fields of the
|
||||
// Connack MQTT packet
|
||||
type ConnackPacket struct {
|
||||
FixedHeader
|
||||
SessionPresent bool
|
||||
@ -15,10 +15,7 @@ type ConnackPacket struct {
|
||||
}
|
||||
|
||||
func (ca *ConnackPacket) String() string {
|
||||
str := fmt.Sprintf("%s", ca.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("sessionpresent: %t returncode: %d", ca.SessionPresent, ca.ReturnCode)
|
||||
return str
|
||||
return fmt.Sprintf("%s sessionpresent: %t returncode: %d", ca.FixedHeader, ca.SessionPresent, ca.ReturnCode)
|
||||
}
|
||||
|
||||
func (ca *ConnackPacket) Write(w io.Writer) error {
|
||||
@ -35,8 +32,8 @@ func (ca *ConnackPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (ca *ConnackPacket) Unpack(b io.Reader) error {
|
||||
flags, err := decodeByte(b)
|
||||
if err != nil {
|
||||
@ -48,8 +45,8 @@ func (ca *ConnackPacket) Unpack(b io.Reader) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (ca *ConnackPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
29
vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go
generated
vendored
29
vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go
generated
vendored
@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//ConnectPacket is an internal representation of the fields of the
|
||||
//Connect MQTT packet
|
||||
// ConnectPacket is an internal representation of the fields of the
|
||||
// Connect MQTT packet
|
||||
type ConnectPacket struct {
|
||||
FixedHeader
|
||||
ProtocolName string
|
||||
@ -29,10 +29,7 @@ type ConnectPacket struct {
|
||||
}
|
||||
|
||||
func (c *ConnectPacket) String() string {
|
||||
str := fmt.Sprintf("%s", c.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password)
|
||||
return str
|
||||
return fmt.Sprintf("%s protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.FixedHeader, c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password)
|
||||
}
|
||||
|
||||
func (c *ConnectPacket) Write(w io.Writer) error {
|
||||
@ -62,8 +59,8 @@ func (c *ConnectPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (c *ConnectPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
c.ProtocolName, err = decodeString(b)
|
||||
@ -119,36 +116,36 @@ func (c *ConnectPacket) Unpack(b io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Validate performs validation of the fields of a Connect packet
|
||||
// Validate performs validation of the fields of a Connect packet
|
||||
func (c *ConnectPacket) Validate() byte {
|
||||
if c.PasswordFlag && !c.UsernameFlag {
|
||||
return ErrRefusedBadUsernameOrPassword
|
||||
}
|
||||
if c.ReservedBit != 0 {
|
||||
//Bad reserved bit
|
||||
// Bad reserved bit
|
||||
return ErrProtocolViolation
|
||||
}
|
||||
if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) {
|
||||
//Mismatched or unsupported protocol version
|
||||
// Mismatched or unsupported protocol version
|
||||
return ErrRefusedBadProtocolVersion
|
||||
}
|
||||
if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" {
|
||||
//Bad protocol name
|
||||
// Bad protocol name
|
||||
return ErrProtocolViolation
|
||||
}
|
||||
if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 {
|
||||
//Bad size field
|
||||
// Bad size field
|
||||
return ErrProtocolViolation
|
||||
}
|
||||
if len(c.ClientIdentifier) == 0 && !c.CleanSession {
|
||||
//Bad client identifier
|
||||
// Bad client identifier
|
||||
return ErrRefusedIDRejected
|
||||
}
|
||||
return Accepted
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (c *ConnectPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
16
vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go
generated
vendored
16
vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go
generated
vendored
@ -1,19 +1,17 @@
|
||||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
//DisconnectPacket is an internal representation of the fields of the
|
||||
//Disconnect MQTT packet
|
||||
// DisconnectPacket is an internal representation of the fields of the
|
||||
// Disconnect MQTT packet
|
||||
type DisconnectPacket struct {
|
||||
FixedHeader
|
||||
}
|
||||
|
||||
func (d *DisconnectPacket) String() string {
|
||||
str := fmt.Sprintf("%s", d.FixedHeader)
|
||||
return str
|
||||
return d.FixedHeader.String()
|
||||
}
|
||||
|
||||
func (d *DisconnectPacket) Write(w io.Writer) error {
|
||||
@ -23,14 +21,14 @@ func (d *DisconnectPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (d *DisconnectPacket) Unpack(b io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (d *DisconnectPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
80
vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go
generated
vendored
80
vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go
generated
vendored
@ -8,9 +8,9 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//ControlPacket defines the interface for structs intended to hold
|
||||
//decoded MQTT packets, either from being read or before being
|
||||
//written
|
||||
// ControlPacket defines the interface for structs intended to hold
|
||||
// decoded MQTT packets, either from being read or before being
|
||||
// written
|
||||
type ControlPacket interface {
|
||||
Write(io.Writer) error
|
||||
Unpack(io.Reader) error
|
||||
@ -18,8 +18,8 @@ type ControlPacket interface {
|
||||
Details() Details
|
||||
}
|
||||
|
||||
//PacketNames maps the constants for each of the MQTT packet types
|
||||
//to a string representation of their name.
|
||||
// PacketNames maps the constants for each of the MQTT packet types
|
||||
// to a string representation of their name.
|
||||
var PacketNames = map[uint8]string{
|
||||
1: "CONNECT",
|
||||
2: "CONNACK",
|
||||
@ -37,7 +37,7 @@ var PacketNames = map[uint8]string{
|
||||
14: "DISCONNECT",
|
||||
}
|
||||
|
||||
//Below are the constants assigned to each of the MQTT packet types
|
||||
// Below are the constants assigned to each of the MQTT packet types
|
||||
const (
|
||||
Connect = 1
|
||||
Connack = 2
|
||||
@ -55,8 +55,8 @@ const (
|
||||
Disconnect = 14
|
||||
)
|
||||
|
||||
//Below are the const definitions for error codes returned by
|
||||
//Connect()
|
||||
// Below are the const definitions for error codes returned by
|
||||
// Connect()
|
||||
const (
|
||||
Accepted = 0x00
|
||||
ErrRefusedBadProtocolVersion = 0x01
|
||||
@ -68,8 +68,8 @@ const (
|
||||
ErrProtocolViolation = 0xFF
|
||||
)
|
||||
|
||||
//ConnackReturnCodes is a map of the error codes constants for Connect()
|
||||
//to a string representation of the error
|
||||
// ConnackReturnCodes is a map of the error codes constants for Connect()
|
||||
// to a string representation of the error
|
||||
var ConnackReturnCodes = map[uint8]string{
|
||||
0: "Connection Accepted",
|
||||
1: "Connection Refused: Bad Protocol Version",
|
||||
@ -81,23 +81,23 @@ var ConnackReturnCodes = map[uint8]string{
|
||||
255: "Connection Refused: Protocol Violation",
|
||||
}
|
||||
|
||||
//ConnErrors is a map of the errors codes constants for Connect()
|
||||
//to a Go error
|
||||
// ConnErrors is a map of the errors codes constants for Connect()
|
||||
// to a Go error
|
||||
var ConnErrors = map[byte]error{
|
||||
Accepted: nil,
|
||||
ErrRefusedBadProtocolVersion: errors.New("Unnacceptable protocol version"),
|
||||
ErrRefusedIDRejected: errors.New("Identifier rejected"),
|
||||
ErrRefusedServerUnavailable: errors.New("Server Unavailable"),
|
||||
ErrRefusedBadUsernameOrPassword: errors.New("Bad user name or password"),
|
||||
ErrRefusedNotAuthorised: errors.New("Not Authorized"),
|
||||
ErrNetworkError: errors.New("Network Error"),
|
||||
ErrProtocolViolation: errors.New("Protocol Violation"),
|
||||
ErrRefusedBadProtocolVersion: errors.New("unacceptable protocol version"),
|
||||
ErrRefusedIDRejected: errors.New("identifier rejected"),
|
||||
ErrRefusedServerUnavailable: errors.New("server Unavailable"),
|
||||
ErrRefusedBadUsernameOrPassword: errors.New("bad user name or password"),
|
||||
ErrRefusedNotAuthorised: errors.New("not Authorized"),
|
||||
ErrNetworkError: errors.New("network Error"),
|
||||
ErrProtocolViolation: errors.New("protocol Violation"),
|
||||
}
|
||||
|
||||
//ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts
|
||||
//to read an MQTT packet from the stream. It returns a ControlPacket
|
||||
//representing the decoded MQTT packet and an error. One of these returns will
|
||||
//always be nil, a nil ControlPacket indicating an error occurred.
|
||||
// ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts
|
||||
// to read an MQTT packet from the stream. It returns a ControlPacket
|
||||
// representing the decoded MQTT packet and an error. One of these returns will
|
||||
// always be nil, a nil ControlPacket indicating an error occurred.
|
||||
func ReadPacket(r io.Reader) (ControlPacket, error) {
|
||||
var fh FixedHeader
|
||||
b := make([]byte, 1)
|
||||
@ -123,17 +123,17 @@ func ReadPacket(r io.Reader) (ControlPacket, error) {
|
||||
return nil, err
|
||||
}
|
||||
if n != fh.RemainingLength {
|
||||
return nil, errors.New("Failed to read expected data")
|
||||
return nil, errors.New("failed to read expected data")
|
||||
}
|
||||
|
||||
err = cp.Unpack(bytes.NewBuffer(packetBytes))
|
||||
return cp, err
|
||||
}
|
||||
|
||||
//NewControlPacket is used to create a new ControlPacket of the type specified
|
||||
//by packetType, this is usually done by reference to the packet type constants
|
||||
//defined in packets.go. The newly created ControlPacket is empty and a pointer
|
||||
//is returned.
|
||||
// NewControlPacket is used to create a new ControlPacket of the type specified
|
||||
// by packetType, this is usually done by reference to the packet type constants
|
||||
// defined in packets.go. The newly created ControlPacket is empty and a pointer
|
||||
// is returned.
|
||||
func NewControlPacket(packetType byte) ControlPacket {
|
||||
switch packetType {
|
||||
case Connect:
|
||||
@ -168,9 +168,9 @@ func NewControlPacket(packetType byte) ControlPacket {
|
||||
return nil
|
||||
}
|
||||
|
||||
//NewControlPacketWithHeader is used to create a new ControlPacket of the type
|
||||
//specified within the FixedHeader that is passed to the function.
|
||||
//The newly created ControlPacket is empty and a pointer is returned.
|
||||
// NewControlPacketWithHeader is used to create a new ControlPacket of the type
|
||||
// specified within the FixedHeader that is passed to the function.
|
||||
// The newly created ControlPacket is empty and a pointer is returned.
|
||||
func NewControlPacketWithHeader(fh FixedHeader) (ControlPacket, error) {
|
||||
switch fh.MessageType {
|
||||
case Connect:
|
||||
@ -205,16 +205,16 @@ func NewControlPacketWithHeader(fh FixedHeader) (ControlPacket, error) {
|
||||
return nil, fmt.Errorf("unsupported packet type 0x%x", fh.MessageType)
|
||||
}
|
||||
|
||||
//Details struct returned by the Details() function called on
|
||||
//ControlPackets to present details of the Qos and MessageID
|
||||
//of the ControlPacket
|
||||
// Details struct returned by the Details() function called on
|
||||
// ControlPackets to present details of the Qos and MessageID
|
||||
// of the ControlPacket
|
||||
type Details struct {
|
||||
Qos byte
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
//FixedHeader is a struct to hold the decoded information from
|
||||
//the fixed header of an MQTT ControlPacket
|
||||
// FixedHeader is a struct to hold the decoded information from
|
||||
// the fixed header of an MQTT ControlPacket
|
||||
type FixedHeader struct {
|
||||
MessageType byte
|
||||
Dup bool
|
||||
@ -274,9 +274,9 @@ func decodeUint16(b io.Reader) (uint16, error) {
|
||||
}
|
||||
|
||||
func encodeUint16(num uint16) []byte {
|
||||
bytes := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(bytes, num)
|
||||
return bytes
|
||||
bytesResult := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(bytesResult, num)
|
||||
return bytesResult
|
||||
}
|
||||
|
||||
func encodeString(field string) []byte {
|
||||
@ -329,7 +329,7 @@ func decodeLength(r io.Reader) (int, error) {
|
||||
var rLength uint32
|
||||
var multiplier uint32
|
||||
b := make([]byte, 1)
|
||||
for multiplier < 27 { //fix: Infinite '(digit & 128) == 1' will cause the dead loop
|
||||
for multiplier < 27 { // fix: Infinite '(digit & 128) == 1' will cause the dead loop
|
||||
_, err := io.ReadFull(r, b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
16
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go
generated
vendored
16
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go
generated
vendored
@ -1,19 +1,17 @@
|
||||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
//PingreqPacket is an internal representation of the fields of the
|
||||
//Pingreq MQTT packet
|
||||
// PingreqPacket is an internal representation of the fields of the
|
||||
// Pingreq MQTT packet
|
||||
type PingreqPacket struct {
|
||||
FixedHeader
|
||||
}
|
||||
|
||||
func (pr *PingreqPacket) String() string {
|
||||
str := fmt.Sprintf("%s", pr.FixedHeader)
|
||||
return str
|
||||
return pr.FixedHeader.String()
|
||||
}
|
||||
|
||||
func (pr *PingreqPacket) Write(w io.Writer) error {
|
||||
@ -23,14 +21,14 @@ func (pr *PingreqPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pr *PingreqPacket) Unpack(b io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pr *PingreqPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
16
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go
generated
vendored
16
vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go
generated
vendored
@ -1,19 +1,17 @@
|
||||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
//PingrespPacket is an internal representation of the fields of the
|
||||
//Pingresp MQTT packet
|
||||
// PingrespPacket is an internal representation of the fields of the
|
||||
// Pingresp MQTT packet
|
||||
type PingrespPacket struct {
|
||||
FixedHeader
|
||||
}
|
||||
|
||||
func (pr *PingrespPacket) String() string {
|
||||
str := fmt.Sprintf("%s", pr.FixedHeader)
|
||||
return str
|
||||
return pr.FixedHeader.String()
|
||||
}
|
||||
|
||||
func (pr *PingrespPacket) Write(w io.Writer) error {
|
||||
@ -23,14 +21,14 @@ func (pr *PingrespPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pr *PingrespPacket) Unpack(b io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pr *PingrespPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go
generated
vendored
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go
generated
vendored
@ -5,18 +5,15 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//PubackPacket is an internal representation of the fields of the
|
||||
//Puback MQTT packet
|
||||
// PubackPacket is an internal representation of the fields of the
|
||||
// Puback MQTT packet
|
||||
type PubackPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (pa *PubackPacket) String() string {
|
||||
str := fmt.Sprintf("%s", pa.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("MessageID: %d", pa.MessageID)
|
||||
return str
|
||||
return fmt.Sprintf("%s MessageID: %d", pa.FixedHeader, pa.MessageID)
|
||||
}
|
||||
|
||||
func (pa *PubackPacket) Write(w io.Writer) error {
|
||||
@ -29,8 +26,8 @@ func (pa *PubackPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pa *PubackPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
pa.MessageID, err = decodeUint16(b)
|
||||
@ -38,8 +35,8 @@ func (pa *PubackPacket) Unpack(b io.Reader) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pa *PubackPacket) Details() Details {
|
||||
return Details{Qos: pa.Qos, MessageID: pa.MessageID}
|
||||
}
|
||||
|
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go
generated
vendored
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go
generated
vendored
@ -5,18 +5,15 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//PubcompPacket is an internal representation of the fields of the
|
||||
//Pubcomp MQTT packet
|
||||
// PubcompPacket is an internal representation of the fields of the
|
||||
// Pubcomp MQTT packet
|
||||
type PubcompPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (pc *PubcompPacket) String() string {
|
||||
str := fmt.Sprintf("%s", pc.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("MessageID: %d", pc.MessageID)
|
||||
return str
|
||||
return fmt.Sprintf("%s MessageID: %d", pc.FixedHeader, pc.MessageID)
|
||||
}
|
||||
|
||||
func (pc *PubcompPacket) Write(w io.Writer) error {
|
||||
@ -29,8 +26,8 @@ func (pc *PubcompPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pc *PubcompPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
pc.MessageID, err = decodeUint16(b)
|
||||
@ -38,8 +35,8 @@ func (pc *PubcompPacket) Unpack(b io.Reader) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pc *PubcompPacket) Details() Details {
|
||||
return Details{Qos: pc.Qos, MessageID: pc.MessageID}
|
||||
}
|
||||
|
29
vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go
generated
vendored
29
vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go
generated
vendored
@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//PublishPacket is an internal representation of the fields of the
|
||||
//Publish MQTT packet
|
||||
// PublishPacket is an internal representation of the fields of the
|
||||
// Publish MQTT packet
|
||||
type PublishPacket struct {
|
||||
FixedHeader
|
||||
TopicName string
|
||||
@ -16,12 +16,7 @@ type PublishPacket struct {
|
||||
}
|
||||
|
||||
func (p *PublishPacket) String() string {
|
||||
str := fmt.Sprintf("%s", p.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("topicName: %s MessageID: %d", p.TopicName, p.MessageID)
|
||||
str += " "
|
||||
str += fmt.Sprintf("payload: %s", string(p.Payload))
|
||||
return str
|
||||
return fmt.Sprintf("%s topicName: %s MessageID: %d payload: %s", p.FixedHeader, p.TopicName, p.MessageID, string(p.Payload))
|
||||
}
|
||||
|
||||
func (p *PublishPacket) Write(w io.Writer) error {
|
||||
@ -41,8 +36,8 @@ func (p *PublishPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (p *PublishPacket) Unpack(b io.Reader) error {
|
||||
var payloadLength = p.FixedHeader.RemainingLength
|
||||
var err error
|
||||
@ -61,7 +56,7 @@ func (p *PublishPacket) Unpack(b io.Reader) error {
|
||||
payloadLength -= len(p.TopicName) + 2
|
||||
}
|
||||
if payloadLength < 0 {
|
||||
return fmt.Errorf("Error unpacking publish, payload length < 0")
|
||||
return fmt.Errorf("error unpacking publish, payload length < 0")
|
||||
}
|
||||
p.Payload = make([]byte, payloadLength)
|
||||
_, err = b.Read(p.Payload)
|
||||
@ -69,10 +64,10 @@ func (p *PublishPacket) Unpack(b io.Reader) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Copy creates a new PublishPacket with the same topic and payload
|
||||
//but an empty fixed header, useful for when you want to deliver
|
||||
//a message with different properties such as Qos but the same
|
||||
//content
|
||||
// Copy creates a new PublishPacket with the same topic and payload
|
||||
// but an empty fixed header, useful for when you want to deliver
|
||||
// a message with different properties such as Qos but the same
|
||||
// content
|
||||
func (p *PublishPacket) Copy() *PublishPacket {
|
||||
newP := NewControlPacket(Publish).(*PublishPacket)
|
||||
newP.TopicName = p.TopicName
|
||||
@ -81,8 +76,8 @@ func (p *PublishPacket) Copy() *PublishPacket {
|
||||
return newP
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (p *PublishPacket) Details() Details {
|
||||
return Details{Qos: p.Qos, MessageID: p.MessageID}
|
||||
}
|
||||
|
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go
generated
vendored
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go
generated
vendored
@ -5,18 +5,15 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//PubrecPacket is an internal representation of the fields of the
|
||||
//Pubrec MQTT packet
|
||||
// PubrecPacket is an internal representation of the fields of the
|
||||
// Pubrec MQTT packet
|
||||
type PubrecPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (pr *PubrecPacket) String() string {
|
||||
str := fmt.Sprintf("%s", pr.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("MessageID: %d", pr.MessageID)
|
||||
return str
|
||||
return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID)
|
||||
}
|
||||
|
||||
func (pr *PubrecPacket) Write(w io.Writer) error {
|
||||
@ -29,8 +26,8 @@ func (pr *PubrecPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pr *PubrecPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
pr.MessageID, err = decodeUint16(b)
|
||||
@ -38,8 +35,8 @@ func (pr *PubrecPacket) Unpack(b io.Reader) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pr *PubrecPacket) Details() Details {
|
||||
return Details{Qos: pr.Qos, MessageID: pr.MessageID}
|
||||
}
|
||||
|
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go
generated
vendored
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go
generated
vendored
@ -5,18 +5,15 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//PubrelPacket is an internal representation of the fields of the
|
||||
//Pubrel MQTT packet
|
||||
// PubrelPacket is an internal representation of the fields of the
|
||||
// Pubrel MQTT packet
|
||||
type PubrelPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (pr *PubrelPacket) String() string {
|
||||
str := fmt.Sprintf("%s", pr.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("MessageID: %d", pr.MessageID)
|
||||
return str
|
||||
return fmt.Sprintf("%s MessageID: %d", pr.FixedHeader, pr.MessageID)
|
||||
}
|
||||
|
||||
func (pr *PubrelPacket) Write(w io.Writer) error {
|
||||
@ -29,8 +26,8 @@ func (pr *PubrelPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (pr *PubrelPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
pr.MessageID, err = decodeUint16(b)
|
||||
@ -38,8 +35,8 @@ func (pr *PubrelPacket) Unpack(b io.Reader) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (pr *PubrelPacket) Details() Details {
|
||||
return Details{Qos: pr.Qos, MessageID: pr.MessageID}
|
||||
}
|
||||
|
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go
generated
vendored
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go
generated
vendored
@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//SubackPacket is an internal representation of the fields of the
|
||||
//Suback MQTT packet
|
||||
// SubackPacket is an internal representation of the fields of the
|
||||
// Suback MQTT packet
|
||||
type SubackPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
@ -15,10 +15,7 @@ type SubackPacket struct {
|
||||
}
|
||||
|
||||
func (sa *SubackPacket) String() string {
|
||||
str := fmt.Sprintf("%s", sa.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("MessageID: %d", sa.MessageID)
|
||||
return str
|
||||
return fmt.Sprintf("%s MessageID: %d", sa.FixedHeader, sa.MessageID)
|
||||
}
|
||||
|
||||
func (sa *SubackPacket) Write(w io.Writer) error {
|
||||
@ -34,8 +31,8 @@ func (sa *SubackPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (sa *SubackPacket) Unpack(b io.Reader) error {
|
||||
var qosBuffer bytes.Buffer
|
||||
var err error
|
||||
@ -53,8 +50,8 @@ func (sa *SubackPacket) Unpack(b io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (sa *SubackPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: sa.MessageID}
|
||||
}
|
||||
|
19
vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go
generated
vendored
19
vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go
generated
vendored
@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//SubscribePacket is an internal representation of the fields of the
|
||||
//Subscribe MQTT packet
|
||||
// SubscribePacket is an internal representation of the fields of the
|
||||
// Subscribe MQTT packet
|
||||
type SubscribePacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
@ -16,10 +16,7 @@ type SubscribePacket struct {
|
||||
}
|
||||
|
||||
func (s *SubscribePacket) String() string {
|
||||
str := fmt.Sprintf("%s", s.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("MessageID: %d topics: %s", s.MessageID, s.Topics)
|
||||
return str
|
||||
return fmt.Sprintf("%s MessageID: %d topics: %s", s.FixedHeader, s.MessageID, s.Topics)
|
||||
}
|
||||
|
||||
func (s *SubscribePacket) Write(w io.Writer) error {
|
||||
@ -39,8 +36,8 @@ func (s *SubscribePacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (s *SubscribePacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
s.MessageID, err = decodeUint16(b)
|
||||
@ -59,14 +56,14 @@ func (s *SubscribePacket) Unpack(b io.Reader) error {
|
||||
return err
|
||||
}
|
||||
s.Qoss = append(s.Qoss, qos)
|
||||
payloadLength -= 2 + len(topic) + 1 //2 bytes of string length, plus string, plus 1 byte for Qos
|
||||
payloadLength -= 2 + len(topic) + 1 // 2 bytes of string length, plus string, plus 1 byte for Qos
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (s *SubscribePacket) Details() Details {
|
||||
return Details{Qos: 1, MessageID: s.MessageID}
|
||||
}
|
||||
|
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go
generated
vendored
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go
generated
vendored
@ -5,18 +5,15 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//UnsubackPacket is an internal representation of the fields of the
|
||||
//Unsuback MQTT packet
|
||||
// UnsubackPacket is an internal representation of the fields of the
|
||||
// Unsuback MQTT packet
|
||||
type UnsubackPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
func (ua *UnsubackPacket) String() string {
|
||||
str := fmt.Sprintf("%s", ua.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("MessageID: %d", ua.MessageID)
|
||||
return str
|
||||
return fmt.Sprintf("%s MessageID: %d", ua.FixedHeader, ua.MessageID)
|
||||
}
|
||||
|
||||
func (ua *UnsubackPacket) Write(w io.Writer) error {
|
||||
@ -29,8 +26,8 @@ func (ua *UnsubackPacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (ua *UnsubackPacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
ua.MessageID, err = decodeUint16(b)
|
||||
@ -38,8 +35,8 @@ func (ua *UnsubackPacket) Unpack(b io.Reader) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (ua *UnsubackPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: ua.MessageID}
|
||||
}
|
||||
|
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go
generated
vendored
17
vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go
generated
vendored
@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//UnsubscribePacket is an internal representation of the fields of the
|
||||
//Unsubscribe MQTT packet
|
||||
// UnsubscribePacket is an internal representation of the fields of the
|
||||
// Unsubscribe MQTT packet
|
||||
type UnsubscribePacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
@ -15,10 +15,7 @@ type UnsubscribePacket struct {
|
||||
}
|
||||
|
||||
func (u *UnsubscribePacket) String() string {
|
||||
str := fmt.Sprintf("%s", u.FixedHeader)
|
||||
str += " "
|
||||
str += fmt.Sprintf("MessageID: %d", u.MessageID)
|
||||
return str
|
||||
return fmt.Sprintf("%s MessageID: %d", u.FixedHeader, u.MessageID)
|
||||
}
|
||||
|
||||
func (u *UnsubscribePacket) Write(w io.Writer) error {
|
||||
@ -36,8 +33,8 @@ func (u *UnsubscribePacket) Write(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
// Unpack decodes the details of a ControlPacket after the fixed
|
||||
// header has been read
|
||||
func (u *UnsubscribePacket) Unpack(b io.Reader) error {
|
||||
var err error
|
||||
u.MessageID, err = decodeUint16(b)
|
||||
@ -52,8 +49,8 @@ func (u *UnsubscribePacket) Unpack(b io.Reader) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
// Details returns a Details struct containing the Qos and
|
||||
// MessageID of this ControlPacket
|
||||
func (u *UnsubscribePacket) Details() Details {
|
||||
return Details{Qos: 1, MessageID: u.MessageID}
|
||||
}
|
||||
|
17
vendor/github.com/eclipse/paho.mqtt.golang/ping.go
generated
vendored
17
vendor/github.com/eclipse/paho.mqtt.golang/ping.go
generated
vendored
@ -16,13 +16,16 @@ package mqtt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
func keepalive(c *client) {
|
||||
// keepalive - Send ping when connection unused for set period
|
||||
// connection passed in to avoid race condition on shutdown
|
||||
func keepalive(c *client, conn io.Writer) {
|
||||
defer c.workers.Done()
|
||||
DEBUG.Println(PNG, "keepalive starting")
|
||||
var checkInterval int64
|
||||
@ -51,17 +54,19 @@ func keepalive(c *client) {
|
||||
if atomic.LoadInt32(&c.pingOutstanding) == 0 {
|
||||
DEBUG.Println(PNG, "keepalive sending ping")
|
||||
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
|
||||
//We don't want to wait behind large messages being sent, the Write call
|
||||
//will block until it it able to send the packet.
|
||||
// We don't want to wait behind large messages being sent, the Write call
|
||||
// will block until it it able to send the packet.
|
||||
atomic.StoreInt32(&c.pingOutstanding, 1)
|
||||
ping.Write(c.conn)
|
||||
if err := ping.Write(conn); err != nil {
|
||||
ERROR.Println(PNG, err)
|
||||
}
|
||||
c.lastSent.Store(time.Now())
|
||||
pingSent = time.Now()
|
||||
}
|
||||
}
|
||||
if atomic.LoadInt32(&c.pingOutstanding) > 0 && time.Now().Sub(pingSent) >= c.options.PingTimeout {
|
||||
if atomic.LoadInt32(&c.pingOutstanding) > 0 && time.Since(pingSent) >= c.options.PingTimeout {
|
||||
CRITICAL.Println(PNG, "pingresp not received, disconnecting")
|
||||
c.errors <- errors.New("pingresp not received, disconnecting")
|
||||
c.internalConnLost(errors.New("pingresp not received, disconnecting")) // no harm in calling this if the connection is already down (or shutdown is in progress)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
85
vendor/github.com/eclipse/paho.mqtt.golang/router.go
generated
vendored
85
vendor/github.com/eclipse/paho.mqtt.golang/router.go
generated
vendored
@ -37,17 +37,11 @@ type route struct {
|
||||
// and returns a boolean of the outcome
|
||||
func match(route []string, topic []string) bool {
|
||||
if len(route) == 0 {
|
||||
if len(topic) == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return len(topic) == 0
|
||||
}
|
||||
|
||||
if len(topic) == 0 {
|
||||
if route[0] == "#" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return route[0] == "#"
|
||||
}
|
||||
|
||||
if route[0] == "#" {
|
||||
@ -87,15 +81,13 @@ type router struct {
|
||||
routes *list.List
|
||||
defaultHandler MessageHandler
|
||||
messages chan *packets.PublishPacket
|
||||
stop chan bool
|
||||
}
|
||||
|
||||
// newRouter returns a new instance of a Router and channel which can be used to tell the Router
|
||||
// to stop
|
||||
func newRouter() (*router, chan bool) {
|
||||
router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket), stop: make(chan bool)}
|
||||
stop := router.stop
|
||||
return router, stop
|
||||
func newRouter() *router {
|
||||
router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket)}
|
||||
return router
|
||||
}
|
||||
|
||||
// addRoute takes a topic string and MessageHandler callback. It looks in the current list of
|
||||
@ -105,7 +97,7 @@ func (r *router) addRoute(topic string, callback MessageHandler) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).match(topic) {
|
||||
if e.Value.(*route).topic == topic {
|
||||
r := e.Value.(*route)
|
||||
r.callback = callback
|
||||
return
|
||||
@ -120,7 +112,7 @@ func (r *router) deleteRoute(topic string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).match(topic) {
|
||||
if e.Value.(*route).topic == topic {
|
||||
r.routes.Remove(e)
|
||||
return
|
||||
}
|
||||
@ -139,30 +131,31 @@ func (r *router) setDefaultHandler(handler MessageHandler) {
|
||||
// takes messages off the channel, matches them against the internal route list and calls the
|
||||
// associated callback (or the defaultHandler, if one exists and no other route matched). If
|
||||
// anything is sent down the stop channel the function will end.
|
||||
func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *client) {
|
||||
func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *client) <-chan *PacketAndToken {
|
||||
ackChan := make(chan *PacketAndToken)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case message := <-messages:
|
||||
sent := false
|
||||
r.RLock()
|
||||
m := messageFromPublish(message, client.ackFunc(message))
|
||||
handlers := []MessageHandler{}
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).match(message.TopicName) {
|
||||
if order {
|
||||
handlers = append(handlers, e.Value.(*route).callback)
|
||||
} else {
|
||||
hd := e.Value.(*route).callback
|
||||
go func() {
|
||||
hd(client, m)
|
||||
m.Ack()
|
||||
}()
|
||||
}
|
||||
sent = true
|
||||
for message := range messages {
|
||||
// DEBUG.Println(ROU, "matchAndDispatch received message")
|
||||
sent := false
|
||||
r.RLock()
|
||||
m := messageFromPublish(message, ackFunc(ackChan, client.persist, message))
|
||||
var handlers []MessageHandler
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).match(message.TopicName) {
|
||||
if order {
|
||||
handlers = append(handlers, e.Value.(*route).callback)
|
||||
} else {
|
||||
hd := e.Value.(*route).callback
|
||||
go func() {
|
||||
hd(client, m)
|
||||
m.Ack()
|
||||
}()
|
||||
}
|
||||
sent = true
|
||||
}
|
||||
if !sent && r.defaultHandler != nil {
|
||||
}
|
||||
if !sent {
|
||||
if r.defaultHandler != nil {
|
||||
if order {
|
||||
handlers = append(handlers, r.defaultHandler)
|
||||
} else {
|
||||
@ -171,17 +164,19 @@ func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order
|
||||
m.Ack()
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
DEBUG.Println(ROU, "matchAndDispatch received message and no handler was available. Message will NOT be acknowledged.")
|
||||
}
|
||||
r.RUnlock()
|
||||
for _, handler := range handlers {
|
||||
func() {
|
||||
handler(client, m)
|
||||
m.Ack()
|
||||
}()
|
||||
}
|
||||
case <-r.stop:
|
||||
return
|
||||
}
|
||||
r.RUnlock()
|
||||
for _, handler := range handlers {
|
||||
handler(client, m)
|
||||
m.Ack()
|
||||
}
|
||||
// DEBUG.Println(ROU, "matchAndDispatch handled message")
|
||||
}
|
||||
close(ackChan)
|
||||
DEBUG.Println(ROU, "matchAndDispatch exiting")
|
||||
}()
|
||||
return ackChan
|
||||
}
|
||||
|
36
vendor/github.com/eclipse/paho.mqtt.golang/token.go
generated
vendored
36
vendor/github.com/eclipse/paho.mqtt.golang/token.go
generated
vendored
@ -31,8 +31,24 @@ type PacketAndToken struct {
|
||||
// Token defines the interface for the tokens used to indicate when
|
||||
// actions have completed.
|
||||
type Token interface {
|
||||
// Wait will wait indefinitely for the Token to complete, ie the Publish
|
||||
// to be sent and confirmed receipt from the broker.
|
||||
Wait() bool
|
||||
|
||||
// WaitTimeout takes a time.Duration to wait for the flow associated with the
|
||||
// Token to complete, returns true if it returned before the timeout or
|
||||
// returns false if the timeout occurred. In the case of a timeout the Token
|
||||
// does not have an error set in case the caller wishes to wait again.
|
||||
WaitTimeout(time.Duration) bool
|
||||
|
||||
// Done returns a channel that is closed when the flow associated
|
||||
// with the Token completes. Clients should call Error after the
|
||||
// channel is closed to check if the flow completed successfully.
|
||||
//
|
||||
// Done is provided for use in select statements. Simple use cases may
|
||||
// use Wait or WaitTimeout.
|
||||
Done() <-chan struct{}
|
||||
|
||||
Error() error
|
||||
}
|
||||
|
||||
@ -52,21 +68,14 @@ type baseToken struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// Wait will wait indefinitely for the Token to complete, ie the Publish
|
||||
// to be sent and confirmed receipt from the broker
|
||||
// Wait implements the Token Wait method.
|
||||
func (b *baseToken) Wait() bool {
|
||||
<-b.complete
|
||||
return true
|
||||
}
|
||||
|
||||
// WaitTimeout takes a time.Duration to wait for the flow associated with the
|
||||
// Token to complete, returns true if it returned before the timeout or
|
||||
// returns false if the timeout occurred. In the case of a timeout the Token
|
||||
// does not have an error set in case the caller wishes to wait again
|
||||
// WaitTimeout implements the Token WaitTimeout method.
|
||||
func (b *baseToken) WaitTimeout(d time.Duration) bool {
|
||||
b.m.Lock()
|
||||
defer b.m.Unlock()
|
||||
|
||||
timer := time.NewTimer(d)
|
||||
select {
|
||||
case <-b.complete:
|
||||
@ -80,6 +89,11 @@ func (b *baseToken) WaitTimeout(d time.Duration) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Done implements the Token Done method.
|
||||
func (b *baseToken) Done() <-chan struct{} {
|
||||
return b.complete
|
||||
}
|
||||
|
||||
func (b *baseToken) flowComplete() {
|
||||
select {
|
||||
case <-b.complete:
|
||||
@ -125,7 +139,7 @@ type ConnectToken struct {
|
||||
sessionPresent bool
|
||||
}
|
||||
|
||||
// ReturnCode returns the acknowlegement code in the connack sent
|
||||
// ReturnCode returns the acknowledgement code in the connack sent
|
||||
// in response to a Connect()
|
||||
func (c *ConnectToken) ReturnCode() byte {
|
||||
c.m.RLock()
|
||||
@ -160,6 +174,7 @@ type SubscribeToken struct {
|
||||
baseToken
|
||||
subs []string
|
||||
subResult map[string]byte
|
||||
messageID uint16
|
||||
}
|
||||
|
||||
// Result returns a map of topics that were subscribed to along with
|
||||
@ -175,6 +190,7 @@ func (s *SubscribeToken) Result() map[string]byte {
|
||||
// required to provide information about calls to Unsubscribe()
|
||||
type UnsubscribeToken struct {
|
||||
baseToken
|
||||
messageID uint16
|
||||
}
|
||||
|
||||
// DisconnectToken is an extension of Token containing the extra fields
|
||||
|
28
vendor/github.com/eclipse/paho.mqtt.golang/topic.go
generated
vendored
28
vendor/github.com/eclipse/paho.mqtt.golang/topic.go
generated
vendored
@ -19,18 +19,18 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
//ErrInvalidQos is the error returned when an packet is to be sent
|
||||
//with an invalid Qos value
|
||||
var ErrInvalidQos = errors.New("Invalid QoS")
|
||||
// ErrInvalidQos is the error returned when an packet is to be sent
|
||||
// with an invalid Qos value
|
||||
var ErrInvalidQos = errors.New("invalid QoS")
|
||||
|
||||
//ErrInvalidTopicEmptyString is the error returned when a topic string
|
||||
//is passed in that is 0 length
|
||||
var ErrInvalidTopicEmptyString = errors.New("Invalid Topic; empty string")
|
||||
// ErrInvalidTopicEmptyString is the error returned when a topic string
|
||||
// is passed in that is 0 length
|
||||
var ErrInvalidTopicEmptyString = errors.New("invalid Topic; empty string")
|
||||
|
||||
//ErrInvalidTopicMultilevel is the error returned when a topic string
|
||||
//is passed in that has the multi level wildcard in any position but
|
||||
//the last
|
||||
var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard must be last level")
|
||||
// ErrInvalidTopicMultilevel is the error returned when a topic string
|
||||
// is passed in that has the multi level wildcard in any position but
|
||||
// the last
|
||||
var ErrInvalidTopicMultilevel = errors.New("invalid Topic; multi-level wildcard must be last level")
|
||||
|
||||
// Topic Names and Topic Filters
|
||||
// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard
|
||||
@ -46,10 +46,14 @@ var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard
|
||||
// - A TopicName may not contain a wildcard.
|
||||
// - A TopicFilter may only have a # (multi-level) wildcard as the last level.
|
||||
// - A TopicFilter may contain any number of + (single-level) wildcards.
|
||||
// - A TopicFilter with a # will match the absense of a level
|
||||
// - A TopicFilter with a # will match the absence of a level
|
||||
// Example: a subscription to "foo/#" will match messages published to "foo".
|
||||
|
||||
func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) {
|
||||
if len(subs) == 0 {
|
||||
return nil, nil, errors.New("invalid subscription; subscribe map must not be empty")
|
||||
}
|
||||
|
||||
var topics []string
|
||||
var qoss []byte
|
||||
for topic, qos := range subs {
|
||||
@ -75,7 +79,7 @@ func validateTopicAndQos(topic string, qos byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
if qos < 0 || qos > 2 {
|
||||
if qos > 2 {
|
||||
return ErrInvalidQos
|
||||
}
|
||||
return nil
|
||||
|
2
vendor/github.com/eclipse/paho.mqtt.golang/trace.go
generated
vendored
2
vendor/github.com/eclipse/paho.mqtt.golang/trace.go
generated
vendored
@ -27,7 +27,7 @@ type (
|
||||
NOOPLogger struct{}
|
||||
)
|
||||
|
||||
func (NOOPLogger) Println(v ...interface{}) {}
|
||||
func (NOOPLogger) Println(v ...interface{}) {}
|
||||
func (NOOPLogger) Printf(format string, v ...interface{}) {}
|
||||
|
||||
// Internal levels of library output that are initialised to not print
|
||||
|
109
vendor/github.com/eclipse/paho.mqtt.golang/websocket.go
generated
vendored
Normal file
109
vendor/github.com/eclipse/paho.mqtt.golang/websocket.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
// WebsocketOptions are config options for a websocket dialer
|
||||
type WebsocketOptions struct {
|
||||
ReadBufferSize int
|
||||
WriteBufferSize int
|
||||
}
|
||||
|
||||
// NewWebsocket returns a new websocket and returns a net.Conn compatible interface using the gorilla/websocket package
|
||||
func NewWebsocket(host string, tlsc *tls.Config, timeout time.Duration, requestHeader http.Header, options *WebsocketOptions) (net.Conn, error) {
|
||||
if timeout == 0 {
|
||||
timeout = 10 * time.Second
|
||||
}
|
||||
|
||||
if options == nil {
|
||||
// Apply default options
|
||||
options = &WebsocketOptions{}
|
||||
}
|
||||
|
||||
dialer := &websocket.Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: timeout,
|
||||
EnableCompression: false,
|
||||
TLSClientConfig: tlsc,
|
||||
Subprotocols: []string{"mqtt"},
|
||||
ReadBufferSize: options.ReadBufferSize,
|
||||
WriteBufferSize: options.WriteBufferSize,
|
||||
}
|
||||
|
||||
ws, _, err := dialer.Dial(host, requestHeader)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wrapper := &websocketConnector{
|
||||
Conn: ws,
|
||||
}
|
||||
return wrapper, err
|
||||
}
|
||||
|
||||
// websocketConnector is a websocket wrapper so it satisfies the net.Conn interface so it is a
|
||||
// drop in replacement of the golang.org/x/net/websocket package.
|
||||
// Implementation guide taken from https://github.com/gorilla/websocket/issues/282
|
||||
type websocketConnector struct {
|
||||
*websocket.Conn
|
||||
r io.Reader
|
||||
rio sync.Mutex
|
||||
wio sync.Mutex
|
||||
}
|
||||
|
||||
// SetDeadline sets both the read and write deadlines
|
||||
func (c *websocketConnector) SetDeadline(t time.Time) error {
|
||||
if err := c.SetReadDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
err := c.SetWriteDeadline(t)
|
||||
return err
|
||||
}
|
||||
|
||||
// Write writes data to the websocket
|
||||
func (c *websocketConnector) Write(p []byte) (int, error) {
|
||||
c.wio.Lock()
|
||||
defer c.wio.Unlock()
|
||||
|
||||
err := c.WriteMessage(websocket.BinaryMessage, p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Read reads the current websocket frame
|
||||
func (c *websocketConnector) Read(p []byte) (int, error) {
|
||||
c.rio.Lock()
|
||||
defer c.rio.Unlock()
|
||||
for {
|
||||
if c.r == nil {
|
||||
// Advance to next message.
|
||||
var err error
|
||||
_, c.r, err = c.NextReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
n, err := c.r.Read(p)
|
||||
if err == io.EOF {
|
||||
// At end of message.
|
||||
c.r = nil
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
// No data read, continue to next message.
|
||||
continue
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
2
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
2
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
@ -765,7 +765,7 @@ func unescape(s string) (ch string, tail string, err error) {
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(i), s, nil
|
||||
return string(rune(i)), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
.idea/
|
||||
*.iml
|
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
# Gorilla WebSocket
|
||||
|
||||
[](https://godoc.org/github.com/gorilla/websocket)
|
||||
[](https://circleci.com/gh/gorilla/websocket)
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
|
||||
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="https://github.com/crossbario/autobahn-testsuite">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
@ -0,0 +1,395 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
//
|
||||
// Deprecated: Use Dialer instead.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
d := Dialer{
|
||||
ReadBufferSize: readBufSize,
|
||||
WriteBufferSize: writeBufSize,
|
||||
NetDial: func(net, addr string) (net.Conn, error) {
|
||||
return netConn, nil
|
||||
},
|
||||
}
|
||||
return d.Dial(u.String(), requestHeader)
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialContext specifies the dial function for creating TCP connections. If
|
||||
// NetDialContext is nil, net.DialContext is used.
|
||||
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then a useful default size is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
// EnableCompression specifies if the client should attempt to negotiate
|
||||
// per message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// Dial creates a new client connection by calling DialContext with a background context.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
return d.DialContext(context.Background(), urlStr, requestHeader)
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
switch u.Scheme {
|
||||
case "wss":
|
||||
hostPort += ":443"
|
||||
case "https":
|
||||
hostPort += ":443"
|
||||
default:
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer = *DefaultDialer
|
||||
|
||||
// DialContext creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// The context will be used in the request and in the Dialer.
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
if d == nil {
|
||||
d = &nilDialer
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
// User name and password are not allowed in websocket URIs.
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: u,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
for _, cookie := range d.Jar.Cookies(u) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the request headers using the capitalization for names and values in
|
||||
// RFC examples. Although the capitalization shouldn't matter, there are
|
||||
// servers that depend on it. The Header.Set method is not used because the
|
||||
// method canonicalizes the header names.
|
||||
req.Header["Upgrade"] = []string{"websocket"}
|
||||
req.Header["Connection"] = []string{"Upgrade"}
|
||||
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
|
||||
req.Header["Sec-WebSocket-Version"] = []string{"13"}
|
||||
if len(d.Subprotocols) > 0 {
|
||||
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
|
||||
}
|
||||
for k, vs := range requestHeader {
|
||||
switch {
|
||||
case k == "Host":
|
||||
if len(vs) > 0 {
|
||||
req.Host = vs[0]
|
||||
}
|
||||
case k == "Upgrade" ||
|
||||
k == "Connection" ||
|
||||
k == "Sec-Websocket-Key" ||
|
||||
k == "Sec-Websocket-Version" ||
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
|
||||
}
|
||||
|
||||
if d.HandshakeTimeout != 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
var netDial func(network, add string) (net.Conn, error)
|
||||
|
||||
if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
} else {
|
||||
netDialer := &net.Dialer{}
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
if trace != nil && trace.GetConn != nil {
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if trace != nil && trace.GotConn != nil {
|
||||
trace.GotConn(httptrace.GotConnInfo{
|
||||
Conn: netConn,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
|
||||
var err error
|
||||
if trace != nil {
|
||||
err = doHandshakeWithTrace(trace, tlsConn, cfg)
|
||||
} else {
|
||||
err = doHandshake(tlsConn, cfg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if d.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
d.Jar.SetCookies(u, rc)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
|
||||
for _, ext := range parseExtensions(resp.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
_, snct := ext["server_no_context_takeover"]
|
||||
_, cnct := ext["client_no_context_takeover"]
|
||||
if !snct || !cnct {
|
||||
return nil, resp, errInvalidCompression
|
||||
}
|
||||
conn.newCompressionWriter = compressNoContextTakeover
|
||||
conn.newDecompressionReader = decompressNoContextTakeover
|
||||
break
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
||||
|
||||
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return cfg.Clone()
|
||||
}
|
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// cloneTLSConfig clones all public fields except the fields
|
||||
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
|
||||
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
|
||||
// config in active use.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
}
|
||||
}
|
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
|
||||
maxCompressionLevel = flate.BestCompression
|
||||
defaultCompressionLevel = 1
|
||||
)
|
||||
|
||||
var (
|
||||
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
|
||||
flateReaderPool = sync.Pool{New: func() interface{} {
|
||||
return flate.NewReader(nil)
|
||||
}}
|
||||
)
|
||||
|
||||
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
|
||||
const tail =
|
||||
// Add four bytes as specified in RFC
|
||||
"\x00\x00\xff\xff" +
|
||||
// Add final block to squelch unexpected EOF error from flate reader.
|
||||
"\x01\x00\x00\xff\xff"
|
||||
|
||||
fr, _ := flateReaderPool.Get().(io.ReadCloser)
|
||||
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
|
||||
return &flateReadWrapper{fr}
|
||||
}
|
||||
|
||||
func isValidCompressionLevel(level int) bool {
|
||||
return minCompressionLevel <= level && level <= maxCompressionLevel
|
||||
}
|
||||
|
||||
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
|
||||
p := &flateWriterPools[level-minCompressionLevel]
|
||||
tw := &truncWriter{w: w}
|
||||
fw, _ := p.Get().(*flate.Writer)
|
||||
if fw == nil {
|
||||
fw, _ = flate.NewWriter(tw, level)
|
||||
} else {
|
||||
fw.Reset(tw)
|
||||
}
|
||||
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
|
||||
}
|
||||
|
||||
// truncWriter is an io.Writer that writes all but the last four bytes of the
|
||||
// stream to another io.Writer.
|
||||
type truncWriter struct {
|
||||
w io.WriteCloser
|
||||
n int
|
||||
p [4]byte
|
||||
}
|
||||
|
||||
func (w *truncWriter) Write(p []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
// fill buffer first for simplicity.
|
||||
if w.n < len(w.p) {
|
||||
n = copy(w.p[w.n:], p)
|
||||
p = p[n:]
|
||||
w.n += n
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
m := len(p)
|
||||
if m > len(w.p) {
|
||||
m = len(w.p)
|
||||
}
|
||||
|
||||
if nn, err := w.w.Write(w.p[:m]); err != nil {
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
copy(w.p[:], w.p[m:])
|
||||
copy(w.p[len(w.p)-m:], p[len(p)-m:])
|
||||
nn, err := w.w.Write(p[:len(p)-m])
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
type flateWriteWrapper struct {
|
||||
fw *flate.Writer
|
||||
tw *truncWriter
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
|
||||
if w.fw == nil {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
return w.fw.Write(p)
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Close() error {
|
||||
if w.fw == nil {
|
||||
return errWriteClosed
|
||||
}
|
||||
err1 := w.fw.Flush()
|
||||
w.p.Put(w.fw)
|
||||
w.fw = nil
|
||||
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
|
||||
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
|
||||
}
|
||||
err2 := w.tw.w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type flateReadWrapper struct {
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Read(p []byte) (int, error) {
|
||||
if r.fr == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
n, err := r.fr.Read(p)
|
||||
if err == io.EOF {
|
||||
// Preemptively place the reader back in the pool. This helps with
|
||||
// scenarios where the application does not call NextReader() soon after
|
||||
// this final read.
|
||||
r.Close()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Close() error {
|
||||
if r.fr == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
err := r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
return err
|
||||
}
|
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "net"
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
b := net.Buffers(bufs)
|
||||
_, err := b.WriteTo(c.conn)
|
||||
return err
|
||||
}
|
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
if _, err := c.conn.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
@ -0,0 +1,227 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application calls
|
||||
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// if err := conn.WriteMessage(messageType, p); err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// shows how to echo messages using the NextWriter and NextReader methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by calling the handler function
|
||||
// set with the SetCloseHandler method and by returning a *CloseError from the
|
||||
// NextReader, ReadMessage or the message Read method. The default close
|
||||
// handler sends a close message to the peer.
|
||||
//
|
||||
// Connections handle received ping messages by calling the handler function
|
||||
// set with the SetPingHandler method. The default ping handler sends a pong
|
||||
// message to the peer.
|
||||
//
|
||||
// Connections handle received pong messages by calling the handler function
|
||||
// set with the SetPongHandler method. The default pong handler does nothing.
|
||||
// If an application sends ping messages, then the application should set a
|
||||
// pong handler to receive the corresponding pong.
|
||||
//
|
||||
// The control message handler functions are called from the NextReader,
|
||||
// ReadMessage and message reader Read methods. The default close and ping
|
||||
// handlers can block these methods for a short time when the handler writes to
|
||||
// the connection.
|
||||
//
|
||||
// The application must read the connection to process close, ping and pong
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections support one concurrent reader and one concurrent writer.
|
||||
//
|
||||
// Applications are responsible for ensuring that no more than one goroutine
|
||||
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
|
||||
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
|
||||
// that no more than one goroutine calls the read methods (NextReader,
|
||||
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
|
||||
// concurrently.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and the Origin host is
|
||||
// not equal to the Host request header.
|
||||
//
|
||||
// The deprecated package-level Upgrade function does not perform origin
|
||||
// checking. The application is responsible for checking the Origin header
|
||||
// before calling the Upgrade function.
|
||||
//
|
||||
// Buffers
|
||||
//
|
||||
// Connections buffer network input and output to reduce the number
|
||||
// of system calls when reading or writing messages.
|
||||
//
|
||||
// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
|
||||
// Section 5 for a discussion of message framing. A WebSocket frame header is
|
||||
// written to the network each time a write buffer is flushed to the network.
|
||||
// Decreasing the size of the write buffer can increase the amount of framing
|
||||
// overhead on the connection.
|
||||
//
|
||||
// The buffer sizes in bytes are specified by the ReadBufferSize and
|
||||
// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
|
||||
// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
|
||||
// buffers created by the HTTP server when a buffer size field is set to zero.
|
||||
// The HTTP server buffers have a size of 4096 at the time of this writing.
|
||||
//
|
||||
// The buffer sizes do not limit the size of a message that can be read or
|
||||
// written by a connection.
|
||||
//
|
||||
// Buffers are held for the lifetime of the connection by default. If the
|
||||
// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
|
||||
// write buffer only when writing a message.
|
||||
//
|
||||
// Applications should tune the buffer sizes to balance memory use and
|
||||
// performance. Increasing the buffer size uses more memory, but can reduce the
|
||||
// number of system calls to read or write the network. In the case of writing,
|
||||
// increasing the buffer size can reduce the number of frame headers written to
|
||||
// the network.
|
||||
//
|
||||
// Some guidelines for setting buffer parameters are:
|
||||
//
|
||||
// Limit the buffer sizes to the maximum expected message size. Buffers larger
|
||||
// than the largest message do not provide any benefit.
|
||||
//
|
||||
// Depending on the distribution of message sizes, setting the buffer size to
|
||||
// a value less than the maximum expected message size can greatly reduce memory
|
||||
// use with a small impact on performance. Here's an example: If 99% of the
|
||||
// messages are smaller than 256 bytes and the maximum message size is 512
|
||||
// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
|
||||
// than a buffer size of 512 bytes. The memory savings is 50%.
|
||||
//
|
||||
// A write buffer pool is useful when the application has a modest number
|
||||
// writes over a large number of connections. when buffers are pooled, a larger
|
||||
// buffer size has a reduced impact on total memory use and has the benefit of
|
||||
// reducing system calls and frame overhead.
|
||||
//
|
||||
// Compression EXPERIMENTAL
|
||||
//
|
||||
// Per message compression extensions (RFC 7692) are experimentally supported
|
||||
// by this package in a limited capacity. Setting the EnableCompression option
|
||||
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
|
||||
// support.
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// EnableCompression: true,
|
||||
// }
|
||||
//
|
||||
// If compression was successfully negotiated with the connection's peer, any
|
||||
// message received in compressed form will be automatically decompressed.
|
||||
// All Read methods will return uncompressed bytes.
|
||||
//
|
||||
// Per message compression of messages written to a connection can be enabled
|
||||
// or disabled by calling the corresponding Conn method:
|
||||
//
|
||||
// conn.EnableWriteCompression(false)
|
||||
//
|
||||
// Currently this package does not support compression with "context takeover".
|
||||
// This means that messages must be compressed and decompressed in isolation,
|
||||
// without retaining sliding window or dictionary state across messages. For
|
||||
// more details refer to RFC 7692.
|
||||
//
|
||||
// Use of compression is experimental and may result in decreased performance.
|
||||
package websocket
|
3
vendor/github.com/gorilla/websocket/go.mod
generated
vendored
Normal file
3
vendor/github.com/gorilla/websocket/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/gorilla/websocket
|
||||
|
||||
go 1.12
|
0
vendor/github.com/gorilla/websocket/go.sum
generated
vendored
Normal file
0
vendor/github.com/gorilla/websocket/go.sum
generated
vendored
Normal file
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JoinMessages concatenates received messages to create a single io.Reader.
|
||||
// The string term is appended to each message. The returned reader does not
|
||||
// support concurrent calls to the Read method.
|
||||
func JoinMessages(c *Conn, term string) io.Reader {
|
||||
return &joinReader{c: c, term: term}
|
||||
}
|
||||
|
||||
type joinReader struct {
|
||||
c *Conn
|
||||
term string
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r *joinReader) Read(p []byte) (int, error) {
|
||||
if r.r == nil {
|
||||
var err error
|
||||
_, r.r, err = r.c.NextReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if r.term != "" {
|
||||
r.r = io.MultiReader(r.r, strings.NewReader(r.term))
|
||||
}
|
||||
}
|
||||
n, err := r.r.Read(p)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
r.r = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// Deprecated: Use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// Deprecated: Use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// One value is expected in the message.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package websocket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
// Mask one byte at a time to word boundary.
|
||||
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
|
||||
n = wordSize - n
|
||||
for i := range b[:n] {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Create aligned word size key.
|
||||
var k [wordSize]byte
|
||||
for i := range k {
|
||||
k[i] = key[(pos+i)&3]
|
||||
}
|
||||
kw := *(*uintptr)(unsafe.Pointer(&k))
|
||||
|
||||
// Mask one word at a time.
|
||||
n := (len(b) / wordSize) * wordSize
|
||||
for i := 0; i < n; i += wordSize {
|
||||
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
|
||||
}
|
||||
|
||||
// Mask one byte at a time for remaining bytes.
|
||||
b = b[n:]
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
|
||||
return pos & 3
|
||||
}
|
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package websocket
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PreparedMessage caches on the wire representations of a message payload.
|
||||
// Use PreparedMessage to efficiently send a message payload to multiple
|
||||
// connections. PreparedMessage is especially useful when compression is used
|
||||
// because the CPU and memory expensive compression operation can be executed
|
||||
// once for a given set of compression options.
|
||||
type PreparedMessage struct {
|
||||
messageType int
|
||||
data []byte
|
||||
mu sync.Mutex
|
||||
frames map[prepareKey]*preparedFrame
|
||||
}
|
||||
|
||||
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
|
||||
type prepareKey struct {
|
||||
isServer bool
|
||||
compress bool
|
||||
compressionLevel int
|
||||
}
|
||||
|
||||
// preparedFrame contains data in wire representation.
|
||||
type preparedFrame struct {
|
||||
once sync.Once
|
||||
data []byte
|
||||
}
|
||||
|
||||
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
|
||||
// it to connection using WritePreparedMessage method. Valid wire
|
||||
// representation will be calculated lazily only once for a set of current
|
||||
// connection options.
|
||||
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
|
||||
pm := &PreparedMessage{
|
||||
messageType: messageType,
|
||||
frames: make(map[prepareKey]*preparedFrame),
|
||||
data: data,
|
||||
}
|
||||
|
||||
// Prepare a plain server frame.
|
||||
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To protect against caller modifying the data argument, remember the data
|
||||
// copied to the plain server frame.
|
||||
pm.data = frameData[len(frameData)-len(data):]
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
|
||||
pm.mu.Lock()
|
||||
frame, ok := pm.frames[key]
|
||||
if !ok {
|
||||
frame = &preparedFrame{}
|
||||
pm.frames[key] = frame
|
||||
}
|
||||
pm.mu.Unlock()
|
||||
|
||||
var err error
|
||||
frame.once.Do(func() {
|
||||
// Prepare a frame using a 'fake' connection.
|
||||
// TODO: Refactor code in conn.go to allow more direct construction of
|
||||
// the frame.
|
||||
mu := make(chan struct{}, 1)
|
||||
mu <- struct{}{}
|
||||
var nc prepareConn
|
||||
c := &Conn{
|
||||
conn: &nc,
|
||||
mu: mu,
|
||||
isServer: key.isServer,
|
||||
compressionLevel: key.compressionLevel,
|
||||
enableWriteCompression: true,
|
||||
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
|
||||
}
|
||||
if key.compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
}
|
||||
err = c.WriteMessage(pm.messageType, pm.data)
|
||||
frame.data = nc.buf.Bytes()
|
||||
})
|
||||
return pm.messageType, frame.data, err
|
||||
}
|
||||
|
||||
type prepareConn struct {
|
||||
buf bytes.Buffer
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
|
||||
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
|
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type netDialerFunc func(network, addr string) (net.Conn, error)
|
||||
|
||||
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
|
||||
return fn(network, addr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
|
||||
return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpProxyDialer struct {
|
||||
proxyURL *url.URL
|
||||
forwardDial func(network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
|
||||
hostPort, _ := hostPortNoPort(hpd.proxyURL)
|
||||
conn, err := hpd.forwardDial(network, hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectHeader := make(http.Header)
|
||||
if user := hpd.proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: addr},
|
||||
Host: addr,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
if err := connectReq.Write(conn); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read response. It's OK to use and discard buffered reader here becaue
|
||||
// the remote server does not speak until spoken to.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
conn.Close()
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, errors.New(f[1])
|
||||
}
|
||||
return conn, nil
|
||||
}
|
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then buffers allocated by the HTTP server are used. The
|
||||
// I/O buffer sizes do not limit the size of the messages that can be sent
|
||||
// or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is not nil, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client. If there's no match, then no protocol is
|
||||
// negotiated (the Sec-Websocket-Protocol header is not included in the
|
||||
// handshake response).
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, then a safe default is used: return false if the
|
||||
// Origin request header is present and the origin host is not equal to
|
||||
// request Host header.
|
||||
//
|
||||
// A CheckOrigin function should carefully validate the request origin to
|
||||
// prevent cross-site request forgery.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
// message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
w.Header().Set("Sec-Websocket-Version", "13")
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return equalASCIIFold(u.Host, r.Host)
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-WebSocket-Protocol).
|
||||
//
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
const badHandshake = "websocket: the client is not using the websocket protocol: "
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
// Negotiate PMCE
|
||||
var compress bool
|
||||
if u.EnableCompression {
|
||||
for _, ext := range parseExtensions(r.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
compress = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var brw *bufio.ReadWriter
|
||||
netConn, brw, err := h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
if brw.Reader.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
var br *bufio.Reader
|
||||
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
|
||||
// Reuse hijacked buffered reader as connection reader.
|
||||
br = brw.Reader
|
||||
}
|
||||
|
||||
buf := bufioWriterBuffer(netConn, brw.Writer)
|
||||
|
||||
var writeBuf []byte
|
||||
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
|
||||
// Reuse hijacked write buffer as connection buffer.
|
||||
writeBuf = buf
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
if compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
c.newDecompressionReader = decompressNoContextTakeover
|
||||
}
|
||||
|
||||
// Use larger of hijacked buffer and connection write buffer for header.
|
||||
p := buf
|
||||
if len(c.writeBuf) > len(p) {
|
||||
p = c.writeBuf
|
||||
}
|
||||
p = p[:0]
|
||||
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-WebSocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
if compress {
|
||||
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// Deprecated: Use websocket.Upgrader instead.
|
||||
//
|
||||
// Upgrade does not perform origin checking. The application is responsible for
|
||||
// checking the Origin header before calling Upgrade. An example implementation
|
||||
// of the same origin policy check is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", http.StatusForbidden)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// IsWebSocketUpgrade returns true if the client requested upgrade to the
|
||||
// WebSocket protocol.
|
||||
func IsWebSocketUpgrade(r *http.Request) bool {
|
||||
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
|
||||
tokenListContainsValue(r.Header, "Upgrade", "websocket")
|
||||
}
|
||||
|
||||
// bufioReaderSize size returns the size of a bufio.Reader.
|
||||
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
|
||||
// This code assumes that peek on a reset reader returns
|
||||
// bufio.Reader.buf[:0].
|
||||
// TODO: Use bufio.Reader.Size() after Go 1.10
|
||||
br.Reset(originalReader)
|
||||
if p, err := br.Peek(0); err == nil {
|
||||
return cap(p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeHook is an io.Writer that records the last slice passed to it vio
|
||||
// io.Writer.Write.
|
||||
type writeHook struct {
|
||||
p []byte
|
||||
}
|
||||
|
||||
func (wh *writeHook) Write(p []byte) (int, error) {
|
||||
wh.p = p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
|
||||
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
|
||||
// This code assumes that bufio.Writer.buf[:1] is passed to the
|
||||
// bufio.Writer's underlying writer.
|
||||
var wh writeHook
|
||||
bw.Reset(&wh)
|
||||
bw.WriteByte(0)
|
||||
bw.Flush()
|
||||
|
||||
bw.Reset(originalWriter)
|
||||
|
||||
return wh.p[:cap(wh.p)]
|
||||
}
|
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if trace.TLSHandshakeStart != nil {
|
||||
trace.TLSHandshakeStart()
|
||||
}
|
||||
err := doHandshake(tlsConn, cfg)
|
||||
if trace.TLSHandshakeDone != nil {
|
||||
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
|
||||
}
|
||||
return err
|
||||
}
|
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
return doHandshake(tlsConn, cfg)
|
||||
}
|
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
||||
|
||||
// Token octets per RFC 2616.
|
||||
var isTokenOctet = [256]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
// skipSpace returns a slice of the string s with all leading RFC 2616 linear
|
||||
// whitespace removed.
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if b := s[i]; b != ' ' && b != '\t' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
// nextToken returns the leading RFC 2616 token of s and the string following
|
||||
// the token.
|
||||
func nextToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if !isTokenOctet[s[i]] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
|
||||
// and the string following the token or quoted string.
|
||||
func nextTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return nextToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// equalASCIIFold returns true if s is equal to t with ASCII case folding as
|
||||
// defined in RFC 4790.
|
||||
func equalASCIIFold(s, t string) bool {
|
||||
for s != "" && t != "" {
|
||||
sr, size := utf8.DecodeRuneInString(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRuneInString(t)
|
||||
t = t[size:]
|
||||
if sr == tr {
|
||||
continue
|
||||
}
|
||||
if 'A' <= sr && sr <= 'Z' {
|
||||
sr = sr + 'a' - 'A'
|
||||
}
|
||||
if 'A' <= tr && tr <= 'Z' {
|
||||
tr = tr + 'a' - 'A'
|
||||
}
|
||||
if sr != tr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s == t
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains a token equal to value with ASCII case folding.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if equalASCIIFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseExtensions parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
// extension-list = 1#extension
|
||||
// extension = extension-token *( ";" extension-param )
|
||||
// extension-token = registered-token
|
||||
// registered-token = token
|
||||
// extension-param = token [ "=" (token | quoted-string) ]
|
||||
// ;When using the quoted-string syntax variant, the value
|
||||
// ;after quoted-string unescaping MUST conform to the
|
||||
// ;'token' ABNF.
|
||||
|
||||
var result []map[string]string
|
||||
headers:
|
||||
for _, s := range header["Sec-Websocket-Extensions"] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
ext := map[string]string{"": t}
|
||||
for {
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ";") {
|
||||
break
|
||||
}
|
||||
var k string
|
||||
k, s = nextToken(skipSpace(s[1:]))
|
||||
if k == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
var v string
|
||||
if strings.HasPrefix(s, "=") {
|
||||
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
|
||||
s = skipSpace(s)
|
||||
}
|
||||
if s != "" && s[0] != ',' && s[0] != ';' {
|
||||
continue headers
|
||||
}
|
||||
ext[k] = v
|
||||
}
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
result = append(result, ext)
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
@ -0,0 +1,473 @@
|
||||
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
|
||||
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
//
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type proxy_direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var proxy_Direct = proxy_direct{}
|
||||
|
||||
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the host name
|
||||
// requested matches one of a number of exceptions.
|
||||
type proxy_PerHost struct {
|
||||
def, bypass proxy_Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
|
||||
return &proxy_PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone ".example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *proxy_PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *proxy_PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *proxy_PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *proxy_PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type proxy_Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type proxy_Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func proxy_FromEnvironment() proxy_Dialer {
|
||||
allProxy := proxy_allProxyEnv.Get()
|
||||
if len(allProxy) == 0 {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
noProxy := proxy_noProxyEnv.Get()
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := proxy_NewPerHost(proxy, proxy_Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
|
||||
if proxy_proxySchemes == nil {
|
||||
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
|
||||
}
|
||||
proxy_proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
var auth *proxy_Auth
|
||||
if u.User != nil {
|
||||
auth = new(proxy_Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return proxy_SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxy_proxySchemes != nil {
|
||||
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
proxy_allProxyEnv = &proxy_envOnce{
|
||||
names: []string{"ALL_PROXY", "all_proxy"},
|
||||
}
|
||||
proxy_noProxyEnv = &proxy_envOnce{
|
||||
names: []string{"NO_PROXY", "no_proxy"},
|
||||
}
|
||||
)
|
||||
|
||||
// envOnce looks up an environment variable (optionally by multiple
|
||||
// names) once. It mitigates expensive lookups on some platforms
|
||||
// (e.g. Windows).
|
||||
// (Borrowed from net/http/transport.go)
|
||||
type proxy_envOnce struct {
|
||||
names []string
|
||||
once sync.Once
|
||||
val string
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) Get() string {
|
||||
e.once.Do(e.init)
|
||||
return e.val
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) init() {
|
||||
for _, n := range e.names {
|
||||
e.val = os.Getenv(n)
|
||||
if e.val != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928 and RFC 1929.
|
||||
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
s := &proxy_socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type proxy_socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward proxy_Dialer
|
||||
}
|
||||
|
||||
const proxy_socks5Version = 5
|
||||
|
||||
const (
|
||||
proxy_socks5AuthNone = 0
|
||||
proxy_socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const proxy_socks5Connect = 1
|
||||
|
||||
const (
|
||||
proxy_socks5IP4 = 1
|
||||
proxy_socks5Domain = 3
|
||||
proxy_socks5IP6 = 4
|
||||
)
|
||||
|
||||
var proxy_socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
|
||||
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.connect(conn, addr); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// connect takes an existing connection to a socks5 proxy server,
|
||||
// and commands the server to extend that connection to target,
|
||||
// which must be a canonical address with a host and port.
|
||||
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
|
||||
host, portStr, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, proxy_socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
// See RFC 1929
|
||||
if buf[1] == proxy_socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, proxy_socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, proxy_socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return errors.New("proxy: destination host name too long: " + host)
|
||||
}
|
||||
buf = append(buf, proxy_socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(proxy_socks5Errors) {
|
||||
failure = proxy_socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case proxy_socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case proxy_socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case proxy_socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
9
vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
generated
vendored
9
vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
generated
vendored
@ -1,9 +0,0 @@
|
||||
(The MIT License)
|
||||
|
||||
Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
42
vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
generated
vendored
42
vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
generated
vendored
@ -1,42 +0,0 @@
|
||||
# Windows Terminal Sequences
|
||||
|
||||
This library allow for enabling Windows terminal color support for Go.
|
||||
|
||||
See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
sequences "github.com/konsorten/go-windows-terminal-sequences"
|
||||
)
|
||||
|
||||
func main() {
|
||||
sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Authors
|
||||
|
||||
The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
|
||||
|
||||
We thank all the authors who provided code to this library:
|
||||
|
||||
* Felix Kollmann
|
||||
* Nicolas Perraut
|
||||
* @dirty49374
|
||||
|
||||
## License
|
||||
|
||||
(The MIT License)
|
||||
|
||||
Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
1
vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
generated
vendored
1
vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
generated
vendored
@ -1 +0,0 @@
|
||||
module github.com/konsorten/go-windows-terminal-sequences
|
35
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
generated
vendored
35
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package sequences
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
|
||||
setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
|
||||
)
|
||||
|
||||
func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
|
||||
const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
|
||||
|
||||
var mode uint32
|
||||
err := syscall.GetConsoleMode(syscall.Stdout, &mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if enable {
|
||||
mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||
} else {
|
||||
mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||
}
|
||||
|
||||
ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode))
|
||||
if ret == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
11
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
generated
vendored
11
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// +build linux darwin
|
||||
|
||||
package sequences
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error {
|
||||
return fmt.Errorf("windows only package")
|
||||
}
|
2
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
2
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
@ -1,2 +1,4 @@
|
||||
logrus
|
||||
vendor
|
||||
|
||||
.idea/
|
||||
|
52
vendor/github.com/sirupsen/logrus/buffer_pool.go
generated
vendored
Normal file
52
vendor/github.com/sirupsen/logrus/buffer_pool.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
bufferPool BufferPool
|
||||
)
|
||||
|
||||
type BufferPool interface {
|
||||
Put(*bytes.Buffer)
|
||||
Get() *bytes.Buffer
|
||||
}
|
||||
|
||||
type defaultPool struct {
|
||||
pool *sync.Pool
|
||||
}
|
||||
|
||||
func (p *defaultPool) Put(buf *bytes.Buffer) {
|
||||
p.pool.Put(buf)
|
||||
}
|
||||
|
||||
func (p *defaultPool) Get() *bytes.Buffer {
|
||||
return p.pool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func getBuffer() *bytes.Buffer {
|
||||
return bufferPool.Get()
|
||||
}
|
||||
|
||||
func putBuffer(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufferPool.Put(buf)
|
||||
}
|
||||
|
||||
// SetBufferPool allows to replace the default logrus buffer pool
|
||||
// to better meets the specific needs of an application.
|
||||
func SetBufferPool(bp BufferPool) {
|
||||
bufferPool = bp
|
||||
}
|
||||
|
||||
func init() {
|
||||
SetBufferPool(&defaultPool{
|
||||
pool: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
14
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
14
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
@ -13,7 +13,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
bufferPool *sync.Pool
|
||||
|
||||
// qualified package name, cached at first use
|
||||
logrusPackage string
|
||||
@ -31,12 +30,6 @@ const (
|
||||
)
|
||||
|
||||
func init() {
|
||||
bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// start at the bottom of the stack before the package-name cache is primed
|
||||
minimumCallerDepth = 1
|
||||
}
|
||||
@ -243,9 +236,12 @@ func (entry Entry) log(level Level, msg string) {
|
||||
|
||||
entry.fireHooks()
|
||||
|
||||
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||
buffer = getBuffer()
|
||||
defer func() {
|
||||
entry.Buffer = nil
|
||||
putBuffer(buffer)
|
||||
}()
|
||||
buffer.Reset()
|
||||
defer bufferPool.Put(buffer)
|
||||
entry.Buffer = buffer
|
||||
|
||||
entry.write()
|
||||
|
45
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
45
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
@ -134,6 +134,51 @@ func Fatal(args ...interface{}) {
|
||||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// TraceFn logs a message from a func at level Trace on the standard logger.
|
||||
func TraceFn(fn LogFunction) {
|
||||
std.TraceFn(fn)
|
||||
}
|
||||
|
||||
// DebugFn logs a message from a func at level Debug on the standard logger.
|
||||
func DebugFn(fn LogFunction) {
|
||||
std.DebugFn(fn)
|
||||
}
|
||||
|
||||
// PrintFn logs a message from a func at level Info on the standard logger.
|
||||
func PrintFn(fn LogFunction) {
|
||||
std.PrintFn(fn)
|
||||
}
|
||||
|
||||
// InfoFn logs a message from a func at level Info on the standard logger.
|
||||
func InfoFn(fn LogFunction) {
|
||||
std.InfoFn(fn)
|
||||
}
|
||||
|
||||
// WarnFn logs a message from a func at level Warn on the standard logger.
|
||||
func WarnFn(fn LogFunction) {
|
||||
std.WarnFn(fn)
|
||||
}
|
||||
|
||||
// WarningFn logs a message from a func at level Warn on the standard logger.
|
||||
func WarningFn(fn LogFunction) {
|
||||
std.WarningFn(fn)
|
||||
}
|
||||
|
||||
// ErrorFn logs a message from a func at level Error on the standard logger.
|
||||
func ErrorFn(fn LogFunction) {
|
||||
std.ErrorFn(fn)
|
||||
}
|
||||
|
||||
// PanicFn logs a message from a func at level Panic on the standard logger.
|
||||
func PanicFn(fn LogFunction) {
|
||||
std.PanicFn(fn)
|
||||
}
|
||||
|
||||
// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||
func FatalFn(fn LogFunction) {
|
||||
std.FatalFn(fn)
|
||||
}
|
||||
|
||||
// Tracef logs a message at level Trace on the standard logger.
|
||||
func Tracef(format string, args ...interface{}) {
|
||||
std.Tracef(format, args...)
|
||||
|
3
vendor/github.com/sirupsen/logrus/go.mod
generated
vendored
3
vendor/github.com/sirupsen/logrus/go.mod
generated
vendored
@ -2,10 +2,9 @@ module github.com/sirupsen/logrus
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/testify v1.2.2
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
6
vendor/github.com/sirupsen/logrus/go.sum
generated
vendored
6
vendor/github.com/sirupsen/logrus/go.sum
generated
vendored
@ -1,12 +1,10 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
54
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
54
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
@ -9,6 +9,11 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// LogFunction For big messages, it can be more efficient to pass a function
|
||||
// and only call it if the log level is actually enables rather than
|
||||
// generating the log message and then checking if the level is enabled
|
||||
type LogFunction func()[]interface{}
|
||||
|
||||
type Logger struct {
|
||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||
@ -70,7 +75,7 @@ func (mw *MutexWrap) Disable() {
|
||||
//
|
||||
// var log = &logrus.Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(logrus.JSONFormatter),
|
||||
// Formatter: new(logrus.TextFormatter),
|
||||
// Hooks: make(logrus.LevelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
@ -195,6 +200,14 @@ func (logger *Logger) Log(level Level, args ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) LogFn(level Level, fn LogFunction) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Log(level, fn()...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Trace(args ...interface{}) {
|
||||
logger.Log(TraceLevel, args...)
|
||||
}
|
||||
@ -234,6 +247,45 @@ func (logger *Logger) Panic(args ...interface{}) {
|
||||
logger.Log(PanicLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) TraceFn(fn LogFunction) {
|
||||
logger.LogFn(TraceLevel, fn)
|
||||
}
|
||||
|
||||
func (logger *Logger) DebugFn(fn LogFunction) {
|
||||
logger.LogFn(DebugLevel, fn)
|
||||
}
|
||||
|
||||
func (logger *Logger) InfoFn(fn LogFunction) {
|
||||
logger.LogFn(InfoLevel, fn)
|
||||
}
|
||||
|
||||
func (logger *Logger) PrintFn(fn LogFunction) {
|
||||
entry := logger.newEntry()
|
||||
entry.Print(fn()...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) WarnFn(fn LogFunction) {
|
||||
logger.LogFn(WarnLevel, fn)
|
||||
}
|
||||
|
||||
func (logger *Logger) WarningFn(fn LogFunction) {
|
||||
logger.WarnFn(fn)
|
||||
}
|
||||
|
||||
func (logger *Logger) ErrorFn(fn LogFunction) {
|
||||
logger.LogFn(ErrorLevel, fn)
|
||||
}
|
||||
|
||||
func (logger *Logger) FatalFn(fn LogFunction) {
|
||||
logger.LogFn(FatalLevel, fn)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) PanicFn(fn LogFunction) {
|
||||
logger.LogFn(PanicLevel, fn)
|
||||
}
|
||||
|
||||
func (logger *Logger) Logln(level Level, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
|
29
vendor/github.com/sirupsen/logrus/terminal_check_windows.go
generated
vendored
29
vendor/github.com/sirupsen/logrus/terminal_check_windows.go
generated
vendored
@ -5,30 +5,23 @@ package logrus
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
sequences "github.com/konsorten/go-windows-terminal-sequences"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func initTerminal(w io.Writer) {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
|
||||
}
|
||||
}
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
var ret bool
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
handle := windows.Handle(v.Fd())
|
||||
var mode uint32
|
||||
err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
|
||||
ret = (err == nil)
|
||||
default:
|
||||
ret = false
|
||||
if err := windows.GetConsoleMode(handle, &mode); err != nil {
|
||||
return false
|
||||
}
|
||||
mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||
if err := windows.SetConsoleMode(handle, mode); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if ret {
|
||||
initTerminal(w)
|
||||
}
|
||||
return ret
|
||||
return false
|
||||
}
|
||||
|
1
vendor/gocv.io/x/gocv/.dockerignore
generated
vendored
1
vendor/gocv.io/x/gocv/.dockerignore
generated
vendored
@ -1 +0,0 @@
|
||||
**
|
60
vendor/gocv.io/x/gocv/.travis.yml
generated
vendored
60
vendor/gocv.io/x/gocv/.travis.yml
generated
vendored
@ -1,60 +0,0 @@
|
||||
# Use new container infrastructure to enable caching
|
||||
sudo: required
|
||||
dist: trusty
|
||||
|
||||
# language is go
|
||||
language: go
|
||||
go:
|
||||
- "1.14"
|
||||
go_import_path: gocv.io/x/gocv
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- libgmp-dev
|
||||
- build-essential
|
||||
- cmake
|
||||
- git
|
||||
- libgtk2.0-dev
|
||||
- pkg-config
|
||||
- libavcodec-dev
|
||||
- libavformat-dev
|
||||
- libswscale-dev
|
||||
- libtbb2
|
||||
- libtbb-dev
|
||||
- libjpeg-dev
|
||||
- libpng-dev
|
||||
- libtiff-dev
|
||||
- libjasper-dev
|
||||
- libdc1394-22-dev
|
||||
- xvfb
|
||||
|
||||
before_install:
|
||||
- ./travis_build_opencv.sh
|
||||
- export PKG_CONFIG_PATH=$(pkg-config --variable pc_path pkg-config):$HOME/usr/lib/pkgconfig
|
||||
- export INCLUDE_PATH=$HOME/usr/include:${INCLUDE_PATH}
|
||||
- export LD_LIBRARY_PATH=$HOME/usr/lib:${LD_LIBRARY_PATH}
|
||||
- sudo ln /dev/null /dev/raw1394
|
||||
- export DISPLAY=:99.0
|
||||
- sh -e /etc/init.d/xvfb start
|
||||
|
||||
before_cache:
|
||||
- rm -f $HOME/fresh-cache
|
||||
|
||||
script:
|
||||
- export GOCV_CAFFE_TEST_FILES="${HOME}/testdata"
|
||||
- export GOCV_TENSORFLOW_TEST_FILES="${HOME}/testdata"
|
||||
- export OPENCV_ENABLE_NONFREE=ON
|
||||
- echo "Ensuring code is well formatted"; ! gofmt -s -d . | read
|
||||
- go test -v -coverprofile=coverage.txt -covermode=atomic -tags matprofile .
|
||||
- go test -tags matprofile ./contrib -coverprofile=contrib.txt -covermode=atomic; cat contrib.txt >> coverage.txt; rm contrib.txt;
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
# Caching so the next build will be fast as possible.
|
||||
cache:
|
||||
timeout: 1000
|
||||
directories:
|
||||
- $HOME/usr
|
||||
- $HOME/testdata
|
82
vendor/gocv.io/x/gocv/CHANGELOG.md
generated
vendored
82
vendor/gocv.io/x/gocv/CHANGELOG.md
generated
vendored
@ -1,3 +1,85 @@
|
||||
0.26.0
|
||||
---
|
||||
* **all**
|
||||
* update to OpenCV 4.5.1
|
||||
* **core**
|
||||
* add Matrix initializers: eye, ones, zeros (#758)
|
||||
* add multidimensional mat creation
|
||||
* add ndim mat constructor
|
||||
* added accumulators
|
||||
* added norm call with two mats (#600)
|
||||
* keep a reference to a []byte that backs a Mat. (#755)
|
||||
* remove guard for DataPtrUint8 since any Mat can be treated an Uint8
|
||||
* add Mat IsContinuous() function, and ensure that any Mat data pointers used to create Go slices only apply to continuous Mats
|
||||
* fix buffer size for Go strings for 32-bit operating systems
|
||||
* **build**
|
||||
* bring back codecov.io
|
||||
* **calib3d**
|
||||
* correctly close mat after test
|
||||
* **dnn**
|
||||
* add ReadNetFromONNX and ReadNetFromONNXBytes (#760)
|
||||
* increase test coverage
|
||||
* **docker**
|
||||
* dockerfiles for opencv gpu builds
|
||||
* **docs**
|
||||
* corrected links to CUDA and OpenVINO
|
||||
* list all unimplemented functions in photo module
|
||||
* replace GoDocs with pkg docs
|
||||
* update ROADMAP from recent contributions
|
||||
* **imgproc**
|
||||
* add test coverage for GetTextSizeWithBaseline()
|
||||
* close all Mats even those based on memory slices
|
||||
* close Mat to avoid memory leak in ToImage()
|
||||
* refactoring of ToImage and ImageToMatXX functions
|
||||
* **openvino**
|
||||
* fix dldt repo in makefile for openvino
|
||||
* **os**
|
||||
* adding gcc-c++ package to rpm deps
|
||||
* **photo**
|
||||
* add SeamlessClone function
|
||||
* **profile**
|
||||
* add created mats in Split and ForwardLayers to profile (#780)
|
||||
|
||||
0.25.0
|
||||
---
|
||||
* **all**
|
||||
* update to opencv release 4.5.0
|
||||
* **build**
|
||||
* add file dependencies needed for DNN tests
|
||||
* add verbose output for tests on CircleCI
|
||||
* also run unit tests on non-free algorithms. YMMV.
|
||||
* fix build with cuda
|
||||
* remove Travis and switch to CircleCI using Docker based builds
|
||||
* update CI builds to Go 1.15
|
||||
* **core**
|
||||
* add mixChannels() method to Mat (#746)
|
||||
* Add toGoStrings helper
|
||||
* support ConvertToWithParams method
|
||||
* **dnn**
|
||||
* Add NMSBoxes function (#736)
|
||||
* Added ability to load Torch file. Tested features for extracting 128d vectors
|
||||
* fix using wrong type for unconnectedlayertype
|
||||
* use default ddepth for conversions to blob from image as recommended by @berak
|
||||
* **docker**
|
||||
* use separate dockerfile for opencv to avoid massive rebuild
|
||||
* **docs**
|
||||
* add recent contributions to ROADMAP and also add cuda functions still in need of implementation
|
||||
* display CircleCI badge in README
|
||||
* minor improvements to CUDA docs in READMEs
|
||||
* **features2d**
|
||||
* add FlannBasedMatcher
|
||||
* add drawmatches (#720)
|
||||
* fix memory leak in SIFT
|
||||
* **highgui**
|
||||
* refactored ROI methods
|
||||
* **imgproc**
|
||||
* Add option to return baseline with GetTextSizeWithBaseline
|
||||
* **objdetect**
|
||||
* Add QRCode DetectAndDecodeMulti
|
||||
* **videoio**
|
||||
* Add video capture properties and set preferred api backend (#739)
|
||||
* fix needed as discussed in golang/go issue #32479
|
||||
|
||||
0.24.0
|
||||
---
|
||||
* **all**
|
||||
|
68
vendor/gocv.io/x/gocv/Dockerfile
generated
vendored
68
vendor/gocv.io/x/gocv/Dockerfile
generated
vendored
@ -1,66 +1,12 @@
|
||||
FROM ubuntu:16.04 AS opencv
|
||||
LABEL maintainer="hybridgroup"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git build-essential cmake pkg-config unzip libgtk2.0-dev \
|
||||
curl ca-certificates libcurl4-openssl-dev libssl-dev \
|
||||
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
|
||||
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG OPENCV_VERSION="4.4.0"
|
||||
ENV OPENCV_VERSION $OPENCV_VERSION
|
||||
|
||||
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv.zip && \
|
||||
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv_contrib.zip && \
|
||||
rm opencv.zip opencv_contrib.zip && \
|
||||
cd opencv-${OPENCV_VERSION} && \
|
||||
mkdir build && cd build && \
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE \
|
||||
-D CMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
|
||||
-D WITH_JASPER=OFF \
|
||||
-D BUILD_DOCS=OFF \
|
||||
-D BUILD_EXAMPLES=OFF \
|
||||
-D BUILD_TESTS=OFF \
|
||||
-D BUILD_PERF_TESTS=OFF \
|
||||
-D BUILD_opencv_java=NO \
|
||||
-D BUILD_opencv_python=NO \
|
||||
-D BUILD_opencv_python2=NO \
|
||||
-D BUILD_opencv_python3=NO \
|
||||
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
|
||||
make -j $(nproc --all) && \
|
||||
make preinstall && make install && ldconfig && \
|
||||
cd / && rm -rf opencv*
|
||||
|
||||
#################
|
||||
# Go + OpenCV #
|
||||
#################
|
||||
FROM opencv AS gocv
|
||||
LABEL maintainer="hybridgroup"
|
||||
|
||||
ARG GOVERSION="1.14.1"
|
||||
ENV GOVERSION $GOVERSION
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git software-properties-common && \
|
||||
curl -Lo go${GOVERSION}.linux-amd64.tar.gz https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xzf go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
rm go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
# to build this docker image:
|
||||
# docker build .
|
||||
FROM gocv/opencv:4.5.1
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
|
||||
|
||||
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
|
||||
WORKDIR $GOPATH
|
||||
COPY . /go/src/gocv.io/x/gocv/
|
||||
|
||||
RUN go get -u -d gocv.io/x/gocv
|
||||
WORKDIR /go/src/gocv.io/x/gocv
|
||||
RUN go build -tags example -o /build/gocv_version -i ./cmd/version/
|
||||
|
||||
WORKDIR ${GOPATH}/src/gocv.io/x/gocv/cmd/version/
|
||||
|
||||
RUN go build -o gocv_version -i main.go
|
||||
|
||||
CMD ["./gocv_version"]
|
||||
CMD ["/build/gocv_version"]
|
||||
|
12
vendor/gocv.io/x/gocv/Dockerfile.gpu
generated
vendored
Normal file
12
vendor/gocv.io/x/gocv/Dockerfile.gpu
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
# to build this docker image:
|
||||
# docker build -f Dockerfile.gpu .
|
||||
FROM gocv/opencv:4.5.1-gpu AS gocv-gpu-test
|
||||
|
||||
ENV GOPATH /go
|
||||
|
||||
COPY . /go/src/gocv.io/x/gocv/
|
||||
|
||||
WORKDIR /go/src/gocv.io/x/gocv
|
||||
RUN go build -tags example -o /build/gocv_cuda_version ./cmd/cuda/
|
||||
|
||||
CMD ["/build/gocv_cuda_version"]
|
44
vendor/gocv.io/x/gocv/Dockerfile.opencv
generated
vendored
Normal file
44
vendor/gocv.io/x/gocv/Dockerfile.opencv
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
# to build this docker image:
|
||||
# docker build -f Dockerfile.opencv -t gocv/opencv:4.5.1 .
|
||||
FROM golang:1.15-buster AS opencv
|
||||
LABEL maintainer="hybridgroup"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git build-essential cmake pkg-config unzip libgtk2.0-dev \
|
||||
curl ca-certificates libcurl4-openssl-dev libssl-dev \
|
||||
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
|
||||
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG OPENCV_VERSION="4.5.1"
|
||||
ENV OPENCV_VERSION $OPENCV_VERSION
|
||||
|
||||
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv.zip && \
|
||||
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv_contrib.zip && \
|
||||
rm opencv.zip opencv_contrib.zip && \
|
||||
cd opencv-${OPENCV_VERSION} && \
|
||||
mkdir build && cd build && \
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE \
|
||||
-D WITH_IPP=OFF \
|
||||
-D WITH_OPENGL=OFF \
|
||||
-D WITH_QT=OFF \
|
||||
-D CMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
|
||||
-D OPENCV_ENABLE_NONFREE=ON \
|
||||
-D WITH_JASPER=OFF \
|
||||
-D BUILD_DOCS=OFF \
|
||||
-D BUILD_EXAMPLES=OFF \
|
||||
-D BUILD_TESTS=OFF \
|
||||
-D BUILD_PERF_TESTS=OFF \
|
||||
-D BUILD_opencv_java=NO \
|
||||
-D BUILD_opencv_python=NO \
|
||||
-D BUILD_opencv_python2=NO \
|
||||
-D BUILD_opencv_python3=NO \
|
||||
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
|
||||
make -j $(nproc --all) && \
|
||||
make preinstall && make install && ldconfig && \
|
||||
cd / && rm -rf opencv*
|
||||
|
||||
CMD ["go version"]
|
62
vendor/gocv.io/x/gocv/Dockerfile.opencv-gpu
generated
vendored
Normal file
62
vendor/gocv.io/x/gocv/Dockerfile.opencv-gpu
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
# to build this docker image:
|
||||
# docker build -f Dockerfile.opencv-gpu -t gocv/opencv:4.5.1-gpu .
|
||||
FROM nvidia/cuda:10.2-cudnn7-devel AS opencv-gpu-base
|
||||
LABEL maintainer="hybridgroup"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git build-essential cmake pkg-config unzip libgtk2.0-dev \
|
||||
wget curl ca-certificates libcurl4-openssl-dev libssl-dev \
|
||||
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
|
||||
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG OPENCV_VERSION="4.5.1"
|
||||
ENV OPENCV_VERSION $OPENCV_VERSION
|
||||
|
||||
RUN curl -Lo opencv.zip https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv.zip && \
|
||||
curl -Lo opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${OPENCV_VERSION}.zip && \
|
||||
unzip -q opencv_contrib.zip && \
|
||||
rm opencv.zip opencv_contrib.zip && \
|
||||
cd opencv-${OPENCV_VERSION} && \
|
||||
mkdir build && cd build && \
|
||||
cmake -D CMAKE_BUILD_TYPE=RELEASE \
|
||||
-D WITH_IPP=OFF \
|
||||
-D WITH_OPENGL=OFF \
|
||||
-D WITH_QT=OFF \
|
||||
-D CMAKE_INSTALL_PREFIX=/usr/local \
|
||||
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${OPENCV_VERSION}/modules \
|
||||
-D OPENCV_ENABLE_NONFREE=ON \
|
||||
-D WITH_JASPER=OFF \
|
||||
-D BUILD_DOCS=OFF \
|
||||
-D BUILD_EXAMPLES=OFF \
|
||||
-D BUILD_TESTS=OFF \
|
||||
-D BUILD_PERF_TESTS=OFF \
|
||||
-D BUILD_opencv_java=NO \
|
||||
-D BUILD_opencv_python=NO \
|
||||
-D BUILD_opencv_python2=NO \
|
||||
-D BUILD_opencv_python3=NO \
|
||||
-D WITH_CUDA=ON \
|
||||
-D ENABLE_FAST_MATH=1 \
|
||||
-D CUDA_FAST_MATH=1 \
|
||||
-D WITH_CUBLAS=1 \
|
||||
-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda/ \
|
||||
-D BUILD_opencv_cudacodec=OFF \
|
||||
-D WITH_CUDNN=ON \
|
||||
-D OPENCV_DNN_CUDA=ON \
|
||||
-D CUDA_GENERATION=Auto \
|
||||
-D OPENCV_GENERATE_PKGCONFIG=ON .. && \
|
||||
make -j $(nproc --all) && \
|
||||
make preinstall && make install && ldconfig && \
|
||||
cd / && rm -rf opencv*
|
||||
|
||||
# install golang here
|
||||
FROM opencv-gpu-base AS opencv-gpu-golang
|
||||
|
||||
ENV GO_RELEASE=1.15.5
|
||||
RUN wget https://dl.google.com/go/go${GO_RELEASE}.linux-amd64.tar.gz && \
|
||||
tar xfv go${GO_RELEASE}.linux-amd64.tar.gz -C /usr/local && \
|
||||
rm go${GO_RELEASE}.linux-amd64.tar.gz
|
||||
ENV PATH="${PATH}:/usr/local/go/bin"
|
||||
|
||||
CMD ["go version"]
|
31
vendor/gocv.io/x/gocv/Makefile
generated
vendored
31
vendor/gocv.io/x/gocv/Makefile
generated
vendored
@ -2,10 +2,10 @@
|
||||
.PHONY: test deps download build clean astyle cmds docker
|
||||
|
||||
# OpenCV version to use.
|
||||
OPENCV_VERSION?=4.4.0
|
||||
OPENCV_VERSION?=4.5.1
|
||||
|
||||
# Go version to use when building Docker image
|
||||
GOVERSION?=1.14.4
|
||||
GOVERSION?=1.15.3
|
||||
|
||||
# Temporary directory to put files into.
|
||||
TMP_DIR?=/tmp/
|
||||
@ -14,7 +14,7 @@ TMP_DIR?=/tmp/
|
||||
BUILD_SHARED_LIBS?=ON
|
||||
|
||||
# Package list for each well-known Linux distribution
|
||||
RPMS=cmake curl wget git gtk2-devel libpng-devel libjpeg-devel libtiff-devel tbb tbb-devel libdc1394-devel unzip
|
||||
RPMS=cmake curl wget git gtk2-devel libpng-devel libjpeg-devel libtiff-devel tbb tbb-devel libdc1394-devel unzip gcc-c++
|
||||
DEBS=unzip wget build-essential cmake curl git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev
|
||||
|
||||
explain:
|
||||
@ -60,14 +60,15 @@ download:
|
||||
rm opencv.zip opencv_contrib.zip
|
||||
cd -
|
||||
|
||||
# Download dldt source tarballs.
|
||||
download_dldt:
|
||||
# Download openvino source tarballs.
|
||||
download_openvino:
|
||||
sudo rm -rf /usr/local/dldt/
|
||||
sudo git clone https://github.com/opencv/dldt -b 2019 /usr/local/dldt/
|
||||
sudo rm -rf /usr/local/openvino/
|
||||
sudo git clone https://github.com/openvinotoolkit/openvino -b 2019_R3.1 /usr/local/openvino/
|
||||
|
||||
# Build dldt.
|
||||
build_dldt:
|
||||
cd /usr/local/dldt/inference-engine
|
||||
# Build openvino.
|
||||
build_openvino_package:
|
||||
cd /usr/local/openvino/inference-engine
|
||||
sudo git submodule init
|
||||
sudo git submodule update --recursive
|
||||
sudo ./install_dependencies.sh
|
||||
@ -184,10 +185,10 @@ install_raspi_zero: deps download build_raspi_zero sudo_install clean verify
|
||||
install_cuda: deps download sudo_pre_install_clean build_cuda sudo_install clean verify verify_cuda
|
||||
|
||||
# Do everything with openvino.
|
||||
install_openvino: deps download download_dldt sudo_pre_install_clean build_dldt sudo_install_dldt build_openvino sudo_install clean verify_openvino
|
||||
install_openvino: deps download download_openvino sudo_pre_install_clean build_openvino_package sudo_install_openvino build_openvino sudo_install clean verify_openvino
|
||||
|
||||
# Do everything with openvino and cuda.
|
||||
install_all: deps download download_dldt sudo_pre_install_clean build_dldt sudo_install_dldt build_all sudo_install clean verify_openvino verify_cuda
|
||||
install_all: deps download download_openvino sudo_pre_install_clean build_openvino_package sudo_install_openvino build_all sudo_install clean verify_openvino verify_cuda
|
||||
|
||||
# Install system wide.
|
||||
sudo_install:
|
||||
@ -197,8 +198,8 @@ sudo_install:
|
||||
cd -
|
||||
|
||||
# Install system wide.
|
||||
sudo_install_dldt:
|
||||
cd /usr/local/dldt/inference-engine/build
|
||||
sudo_install_openvino:
|
||||
cd /usr/local/openvino/inference-engine/build
|
||||
sudo $(MAKE) install
|
||||
sudo ldconfig
|
||||
cd -
|
||||
@ -219,7 +220,7 @@ verify_openvino:
|
||||
# This assumes env.sh was already sourced.
|
||||
# pvt is not tested here since it requires additional depenedences.
|
||||
test:
|
||||
go test . ./contrib
|
||||
go test -tags matprofile . ./contrib
|
||||
|
||||
docker:
|
||||
docker build --build-arg OPENCV_VERSION=$(OPENCV_VERSION) --build-arg GOVERSION=$(GOVERSION) .
|
||||
@ -227,7 +228,7 @@ docker:
|
||||
astyle:
|
||||
astyle --project=.astylerc --recursive *.cpp,*.h
|
||||
|
||||
CMDS=basic-drawing caffe-classifier captest capwindow counter faceblur facedetect find-circles hand-gestures img-similarity mjpeg-streamer motion-detect pose saveimage savevideo showimage ssd-facedetect tf-classifier tracking version
|
||||
CMDS=basic-drawing caffe-classifier captest capwindow counter faceblur facedetect find-circles hand-gestures hello-sift img-similarity mjpeg-streamer motion-detect pose saveimage savevideo showimage ssd-facedetect tf-classifier tracking version
|
||||
cmds:
|
||||
for cmd in $(CMDS) ; do \
|
||||
go build -o build/$$cmd cmd/$$cmd/main.go ;
|
||||
|
70
vendor/gocv.io/x/gocv/README.md
generated
vendored
70
vendor/gocv.io/x/gocv/README.md
generated
vendored
@ -2,8 +2,8 @@
|
||||
|
||||
[](http://gocv.io/)
|
||||
|
||||
[](https://godoc.org/github.com/hybridgroup/gocv)
|
||||
[](https://travis-ci.org/hybridgroup/gocv)
|
||||
[](https://pkg.go.dev/gocv.io/x/gocv)
|
||||
[](https://circleci.com/gh/hybridgroup/gocv/tree/dev)
|
||||
[](https://ci.appveyor.com/project/deadprogram/gocv/branch/dev)
|
||||
[](https://codecov.io/gh/hybridgroup/gocv)
|
||||
[](https://goreportcard.com/report/github.com/hybridgroup/gocv)
|
||||
@ -11,7 +11,9 @@
|
||||
|
||||
The GoCV package provides Go language bindings for the [OpenCV 4](http://opencv.org/) computer vision library.
|
||||
|
||||
The GoCV package supports the latest releases of Go and OpenCV (v4.4.0) on Linux, macOS, and Windows. We intend to make the Go language a "first-class" client compatible with the latest developments in the OpenCV ecosystem.
|
||||
The GoCV package supports the latest releases of Go and OpenCV (v4.5.1) on Linux, macOS, and Windows. We intend to make the Go language a "first-class" client compatible with the latest developments in the OpenCV ecosystem.
|
||||
|
||||
GoCV supports [CUDA](https://en.wikipedia.org/wiki/CUDA) for hardware acceleration using Nvidia GPUs. Check out the [CUDA README](./cuda/README.md) for more info on how to use GoCV with OpenCV/CUDA.
|
||||
|
||||
GoCV also supports [Intel OpenVINO](https://software.intel.com/en-us/openvino-toolkit). Check out the [OpenVINO README](./openvino/README.md) for more info on how to use GoCV with the Intel OpenVINO toolkit.
|
||||
|
||||
@ -127,17 +129,17 @@ To install GoCV, run the following command:
|
||||
go get -u -d gocv.io/x/gocv
|
||||
```
|
||||
|
||||
To run code that uses the GoCV package, you must also install OpenCV 4.4.0 on your system. Here are instructions for Ubuntu, Raspian, macOS, and Windows.
|
||||
To run code that uses the GoCV package, you must also install OpenCV 4.5.1 on your system. Here are instructions for Ubuntu, Raspian, macOS, and Windows.
|
||||
|
||||
## Ubuntu/Linux
|
||||
|
||||
### Installation
|
||||
|
||||
You can use `make` to install OpenCV 4.4.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
|
||||
You can use `make` to install OpenCV 4.5.1 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
|
||||
|
||||
#### Quick Install
|
||||
|
||||
The following commands should do everything to download and install OpenCV 4.4.0 on Linux:
|
||||
The following commands should do everything to download and install OpenCV 4.5.1 on Linux:
|
||||
|
||||
cd $GOPATH/src/gocv.io/x/gocv
|
||||
make install
|
||||
@ -148,22 +150,22 @@ If you need static opencv libraries
|
||||
|
||||
If it works correctly, at the end of the entire process, the following message should be displayed:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.4.0
|
||||
gocv version: 0.26.0
|
||||
opencv lib version: 4.5.1
|
||||
|
||||
That's it, now you are ready to use GoCV.
|
||||
|
||||
#### Install Cuda
|
||||
#### Using CUDA with GoCV
|
||||
|
||||
[cuda directory](./cuda)
|
||||
See the [cuda directory](./cuda) for information.
|
||||
|
||||
#### Install OpenVINO
|
||||
#### Using OpenVINO with GoCV
|
||||
|
||||
[openvino directory](./openvino)
|
||||
See the [openvino directory](./openvino) for information.
|
||||
|
||||
#### Install OpenVINO and Cuda
|
||||
#### Make Install for OpenVINO and Cuda
|
||||
|
||||
The following commands should do everything to download and install OpenCV 4.4.0 with Cuda and OpenVINO on Linux:
|
||||
The following commands should do everything to download and install OpenCV 4.5.1 with CUDA and OpenVINO on Linux:
|
||||
|
||||
cd $GOPATH/src/gocv.io/x/gocv
|
||||
make install_all
|
||||
@ -174,8 +176,8 @@ If you need static opencv libraries
|
||||
|
||||
If it works correctly, at the end of the entire process, the following message should be displayed:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.4.0-openvino
|
||||
gocv version: 0.26.0
|
||||
opencv lib version: 4.5.1-openvino
|
||||
cuda information:
|
||||
Device 0: "GeForce MX150" 2003Mb, sm_61, Driver/Runtime ver.10.0/10.0
|
||||
|
||||
@ -195,7 +197,7 @@ Next, you need to update the system, and install any required packages:
|
||||
|
||||
#### Download source
|
||||
|
||||
Now, download the OpenCV 4.4.0 and OpenCV Contrib source code:
|
||||
Now, download the OpenCV 4.5.1 and OpenCV Contrib source code:
|
||||
|
||||
make download
|
||||
|
||||
@ -229,8 +231,8 @@ Now you should be able to build or run any of the examples:
|
||||
|
||||
The version program should output the following:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.4.0
|
||||
gocv version: 0.26.0
|
||||
opencv lib version: 4.5.1
|
||||
|
||||
#### Cleanup extra files
|
||||
|
||||
@ -315,19 +317,19 @@ There is a Docker image with Alpine 3.7 that has been created by project contrib
|
||||
|
||||
### Installation
|
||||
|
||||
We have a special installation for the Raspberry Pi that includes some hardware optimizations. You use `make` to install OpenCV 4.4.0 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
|
||||
We have a special installation for the Raspberry Pi that includes some hardware optimizations. You use `make` to install OpenCV 4.5.1 with the handy `Makefile` included with this repo. If you already have installed OpenCV, you do not need to do so again. The installation performed by the `Makefile` is minimal, so it may remove OpenCV options such as Python or Java wrappers if you have already installed OpenCV some other way.
|
||||
|
||||
#### Quick Install
|
||||
|
||||
The following commands should do everything to download and install OpenCV 4.4.0 on Raspbian:
|
||||
The following commands should do everything to download and install OpenCV 4.5.1 on Raspbian:
|
||||
|
||||
cd $GOPATH/src/gocv.io/x/gocv
|
||||
make install_raspi
|
||||
|
||||
If it works correctly, at the end of the entire process, the following message should be displayed:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.4.0
|
||||
gocv version: 0.26.0
|
||||
opencv lib version: 4.5.1
|
||||
|
||||
That's it, now you are ready to use GoCV.
|
||||
|
||||
@ -335,13 +337,13 @@ That's it, now you are ready to use GoCV.
|
||||
|
||||
### Installation
|
||||
|
||||
You can install OpenCV 4.4.0 using Homebrew.
|
||||
You can install OpenCV 4.5.1 using Homebrew.
|
||||
|
||||
If you already have an earlier version of OpenCV (3.4.x) installed, you should probably remove it before installing the new version:
|
||||
|
||||
brew uninstall opencv
|
||||
|
||||
You can then install OpenCV 4.4.0:
|
||||
You can then install OpenCV 4.5.1:
|
||||
|
||||
brew install opencv
|
||||
|
||||
@ -365,8 +367,8 @@ Now you should be able to build or run any of the examples:
|
||||
|
||||
The version program should output the following:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.4.0
|
||||
gocv version: 0.26.0
|
||||
opencv lib version: 4.5.1
|
||||
|
||||
### Cache builds
|
||||
|
||||
@ -381,8 +383,8 @@ By default, pkg-config is used to determine the correct flags for compiling and
|
||||
For example:
|
||||
|
||||
export CGO_CXXFLAGS="--std=c++11"
|
||||
export CGO_CPPFLAGS="-I/usr/local/Cellar/opencv/4.4.0/include"
|
||||
export CGO_LDFLAGS="-L/usr/local/Cellar/opencv/4.4.0/lib -lopencv_stitching -lopencv_superres -lopencv_videostab -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_photo -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_optflow -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_dnn -lopencv_plot -lopencv_xfeatures2d -lopencv_shape -lopencv_video -lopencv_ml -lopencv_ximgproc -lopencv_calib3d -lopencv_features2d -lopencv_highgui -lopencv_videoio -lopencv_flann -lopencv_xobjdetect -lopencv_imgcodecs -lopencv_objdetect -lopencv_xphoto -lopencv_imgproc -lopencv_core"
|
||||
export CGO_CPPFLAGS="-I/usr/local/Cellar/opencv/4.5.1/include"
|
||||
export CGO_LDFLAGS="-L/usr/local/Cellar/opencv/4.5.1/lib -lopencv_stitching -lopencv_superres -lopencv_videostab -lopencv_aruco -lopencv_bgsegm -lopencv_bioinspired -lopencv_ccalib -lopencv_dnn_objdetect -lopencv_dpm -lopencv_face -lopencv_photo -lopencv_fuzzy -lopencv_hfs -lopencv_img_hash -lopencv_line_descriptor -lopencv_optflow -lopencv_reg -lopencv_rgbd -lopencv_saliency -lopencv_stereo -lopencv_structured_light -lopencv_phase_unwrapping -lopencv_surface_matching -lopencv_tracking -lopencv_datasets -lopencv_dnn -lopencv_plot -lopencv_xfeatures2d -lopencv_shape -lopencv_video -lopencv_ml -lopencv_ximgproc -lopencv_calib3d -lopencv_features2d -lopencv_highgui -lopencv_videoio -lopencv_flann -lopencv_xobjdetect -lopencv_imgcodecs -lopencv_objdetect -lopencv_xphoto -lopencv_imgproc -lopencv_core"
|
||||
|
||||
Please note that you will need to run these 3 lines of code one time in your current session in order to build or run the code, in order to setup the needed ENV variables. Once you have done so, you can execute code that uses GoCV with your custom environment like this:
|
||||
|
||||
@ -394,7 +396,7 @@ Please note that you will need to run these 3 lines of code one time in your cur
|
||||
|
||||
The following assumes that you are running a 64-bit version of Windows 10.
|
||||
|
||||
In order to build and install OpenCV 4.4.0 on Windows, you must first download and install MinGW-W64 and CMake, as follows.
|
||||
In order to build and install OpenCV 4.5.1 on Windows, you must first download and install MinGW-W64 and CMake, as follows.
|
||||
|
||||
#### MinGW-W64
|
||||
|
||||
@ -410,9 +412,9 @@ Add the `C:\Program Files\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev2\mingw64\bi
|
||||
|
||||
Download and install CMake [https://cmake.org/download/](https://cmake.org/download/) to the default location. CMake installer will add CMake to your system path.
|
||||
|
||||
#### OpenCV 4.4.0 and OpenCV Contrib Modules
|
||||
#### OpenCV 4.5.1 and OpenCV Contrib Modules
|
||||
|
||||
The following commands should do everything to download and install OpenCV 4.4.0 on Windows:
|
||||
The following commands should do everything to download and install OpenCV 4.5.1 on Windows:
|
||||
|
||||
chdir %GOPATH%\src\gocv.io\x\gocv
|
||||
win_build_opencv.cmd
|
||||
@ -433,8 +435,8 @@ Now you should be able to build or run any of the command examples:
|
||||
|
||||
The version program should output the following:
|
||||
|
||||
gocv version: 0.22.0
|
||||
opencv lib version: 4.4.0
|
||||
gocv version: 0.26.0
|
||||
opencv lib version: 4.5.1
|
||||
|
||||
That's it, now you are ready to use GoCV.
|
||||
|
||||
|
119
vendor/gocv.io/x/gocv/ROADMAP.md
generated
vendored
119
vendor/gocv.io/x/gocv/ROADMAP.md
generated
vendored
@ -16,7 +16,6 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] **Basic structures - WORK STARTED**
|
||||
- [ ] **Operations on arrays - WORK STARTED**. The following functions still need implementation:
|
||||
- [ ] [Mahalanobis](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4493aee129179459cbfc6064f051aa7d)
|
||||
- [ ] [mixChannels](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga51d768c270a1cdd3497255017c4504be)
|
||||
- [ ] [mulTransposed](https://docs.opencv.org/master/d2/de8/group__core__array.html#gadc4e49f8f7a155044e3be1b9e3b270ab)
|
||||
- [ ] [PCABackProject](https://docs.opencv.org/master/d2/de8/group__core__array.html#gab26049f30ee8e94f7d69d82c124faafc)
|
||||
- [ ] [PCACompute](https://docs.opencv.org/master/d2/de8/group__core__array.html#ga4e2073c7311f292a0648f04c37b73781)
|
||||
@ -82,7 +81,10 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] [pointPolygonTest](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga1a539e8db2135af2566103705d7a5722)
|
||||
- [ ] [rotatedRectangleIntersection](https://docs.opencv.org/master/d3/dc0/group__imgproc__shape.html#ga8740e7645628c59d238b0b22c2abe2d4)
|
||||
|
||||
- [ ] Motion Analysis and Object Tracking
|
||||
- [ ] **Motion Analysis and Object Tracking - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [createHanningWindow](https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga80e5c3de52f6bab3a7c1e60e89308e1b)
|
||||
- [ ] [phaseCorrelate](https://docs.opencv.org/master/d7/df3/group__imgproc__motion.html#ga552420a2ace9ef3fb053cd630fdb4952)
|
||||
|
||||
- [ ] **Feature Detection - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [cornerEigenValsAndVecs](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#ga4055896d9ef77dd3cacf2c5f60e13f1c)
|
||||
- [ ] [cornerHarris](https://docs.opencv.org/master/dd/d1a/group__imgproc__feature.html#gac1fc3598018010880e370e2f709b4345)
|
||||
@ -107,6 +109,7 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] [FarnebackOpticalFlow](https://docs.opencv.org/master/de/d9e/classcv_1_1FarnebackOpticalFlow.html)
|
||||
- [ ] [KalmanFilter](https://docs.opencv.org/master/dd/d6a/classcv_1_1KalmanFilter.html)
|
||||
- [ ] [SparsePyrLKOpticalFlow](https://docs.opencv.org/master/d7/d08/classcv_1_1SparsePyrLKOpticalFlow.html)
|
||||
- [ ] [GOTURN](https://docs.opencv.org/master/d7/d4c/classcv_1_1TrackerGOTURN.html)
|
||||
|
||||
- [ ] **calib3d. Camera Calibration and 3D Reconstruction - WORK STARTED**. The following functions still need implementation:
|
||||
- [ ] **Camera Calibration - WORK STARTED** The following functions still need implementation:
|
||||
@ -171,31 +174,102 @@ Your pull requests will be greatly appreciated!
|
||||
|
||||
- [ ] **features2d. 2D Features Framework - WORK STARTED**
|
||||
- [X] **Feature Detection and Description**
|
||||
- [ ] **Descriptor Matchers - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [FlannBasedMatcher](https://docs.opencv.org/master/dc/de2/classcv_1_1FlannBasedMatcher.html)
|
||||
- [ ] **Drawing Function of Keypoints and Matches - WORK STARTED** The following function still needs implementation:
|
||||
- [ ] [drawMatches](https://docs.opencv.org/master/d4/d5d/group__features2d__draw.html#ga7421b3941617d7267e3f2311582f49e1)
|
||||
- [X] **Descriptor Matchers**
|
||||
- [X] **Drawing Function of Keypoints and Matches**
|
||||
- [ ] Object Categorization
|
||||
- [ ] [BOWImgDescriptorExtractor](https://docs.opencv.org/master/d2/d6b/classcv_1_1BOWImgDescriptorExtractor.html)
|
||||
- [ ] [BOWKMeansTrainer](https://docs.opencv.org/master/d4/d72/classcv_1_1BOWKMeansTrainer.html)
|
||||
|
||||
- [X] **objdetect. Object Detection**
|
||||
- [ ] **dnn. Deep Neural Network module - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [NMSBoxes](https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee)
|
||||
|
||||
- [X] **dnn. Deep Neural Network module**
|
||||
- [ ] ml. Machine Learning
|
||||
- [ ] flann. Clustering and Search in Multi-Dimensional Spaces
|
||||
- [ ] photo. Computational Photography
|
||||
- [ ] **photo. Computational Photography - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [inpaint](https://docs.opencv.org/master/d7/d8b/group__photo__inpaint.html#gaedd30dfa0214fec4c88138b51d678085)
|
||||
- [ ] [denoise_TVL1](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga7602ed5ae17b7de40152b922227c4e4f)
|
||||
- [ ] [fastNlMeansDenoising](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga4c6b0031f56ea3f98f768881279ffe93)
|
||||
- [ ] [fastNlMeansDenoisingColored](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#ga03aa4189fc3e31dafd638d90de335617)
|
||||
- [ ] [fastNlMeansDenoisingColoredMulti](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaa501e71f52fb2dc17ff8ca5e7d2d3619)
|
||||
- [ ] [fastNlMeansDenoisingMulti](https://docs.opencv.org/master/d1/d79/group__photo__denoise.html#gaf4421bf068c4d632ea7f0aa38e0bf172)
|
||||
- [ ] [createAlignMTB](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga2f1fafc885a5d79dbfb3542e08db0244)
|
||||
- [ ] [createCalibrateDebevec](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga7fed9707ad5f2cc0e633888867109f90)
|
||||
- [ ] [createCalibrateRobertson](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gae77813a21cd351a596619e5ff013be5d)
|
||||
- [ ] [createMergeDebevec](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gaa8eab36bc764abb2a225db7c945f87f9)
|
||||
- [ ] [createMergeMertens](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga79d59aa3cb3a7c664e59a4b5acc1ccb6)
|
||||
- [ ] [createMergeRobertson](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga460d4a1df1a7e8cdcf7445bb87a8fb78)
|
||||
- [ ] [createTonemap](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gabcbd653140b93a1fa87ccce94548cd0d)
|
||||
- [ ] [createTonemapDrago](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga72bf92bb6b8653ee4be650ac01cf50b6)
|
||||
- [ ] [createTonemapMantiuk](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#ga3b3f3bf083b7515802f039a6a70f2d21)
|
||||
- [ ] [createTonemapReinhard](https://docs.opencv.org/master/d6/df5/group__photo__hdr.html#gadabe7f6bf1fa96ad0fd644df9182c2fb)
|
||||
- [ ] [decolor](https://docs.opencv.org/master/d4/d32/group__photo__decolor.html#ga4864d4c007bda5dacdc5e9d4ed7e222c)
|
||||
- [ ] [detailEnhance](https://docs.opencv.org/master/df/dac/group__photo__render.html#ga0de660cb6f371a464a74c7b651415975)
|
||||
- [ ] [edgePreservingFilter](https://docs.opencv.org/master/df/dac/group__photo__render.html#gafaee2977597029bc8e35da6e67bd31f7)
|
||||
- [ ] [pencilSketch](https://docs.opencv.org/master/df/dac/group__photo__render.html#gae5930dd822c713b36f8529b21ddebd0c)
|
||||
- [ ] [stylization](https://docs.opencv.org/master/df/dac/group__photo__render.html#gacb0f7324017df153d7b5d095aed53206)
|
||||
|
||||
- [ ] stitching. Images stitching
|
||||
- [ ] cudaarithm. Operations on Matrices
|
||||
- [ ] cudabgsegm. Background Segmentation
|
||||
|
||||
## CUDA
|
||||
|
||||
- [ ] **cudaarithm. Operations on Matrices - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [cv::cuda::abs](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga54a72bd772494ab34d05406fd76df2b6)
|
||||
- [ ] [cv::cuda::absdiff](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gac062b283cf46ee90f74a773d3382ab54)
|
||||
- [ ] [cv::cuda::add](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga5d9794bde97ed23d1c1485249074a8b1)
|
||||
- [ ] [cv::cuda::addWeighted](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga2cd14a684ea70c6ab2a63ee90ffe6201)
|
||||
- [ ] [cv::cuda::bitwise_and](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga78d7c1a013877abd4237fbfc4e13bd76)
|
||||
- [ ] [cv::cuda::bitwise_not](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gae58159a2259ae1acc76b531c171cf06a)
|
||||
- [ ] [cv::cuda::bitwise_or](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gafd098ee3e51c68daa793999c1da3dfb7)
|
||||
- [ ] [cv::cuda::bitwise_xor](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga3d95d4faafb099aacf18e8b915a4ad8d)
|
||||
- [ ] [cv::cuda::cartToPolar](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga82210c7d1c1d42e616e554bf75a53480)
|
||||
- [ ] [cv::cuda::compare](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga4d41cd679f4a83862a3de71a6057db54)
|
||||
- [ ] [cv::cuda::divide](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga124315aa226260841e25cc0b9ea99dc3)
|
||||
- [ ] [cv::cuda::exp](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gac6e51541d3bb0a7a396128e4d5919b61)
|
||||
- [ ] [cv::cuda::log](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gaae9c60739e2d1a977b4d3250a0be42ca)
|
||||
- [ ] [cv::cuda::lshift](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gafd072accecb14c9adccdad45e3bf2300)
|
||||
- [ ] [cv::cuda::magnitude](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga3d17f4fcd79d7c01fadd217969009463)
|
||||
- [ ] [cv::cuda::magnitudeSqr](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga7613e382d257e150033d0ce4d6098f6a)
|
||||
- [ ] [cv::cuda::max](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#gadb5dd3d870f10c0866035755b929b1e7)
|
||||
- [ ] [cv::cuda::min](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga74f0b05a65b3d949c237abb5e6c60867)
|
||||
- [ ] [cv::cuda::multiply](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga497cc0615bf717e1e615143b56f00591)
|
||||
- [ ] [cv::cuda::phase](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga5b75ec01be06dcd6e27ada09a0d4656a)
|
||||
- [ ] [cv::cuda::polarToCart](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga01516a286a329c303c2db746513dd9df)
|
||||
- [ ] [cv::cuda::pow](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga82d04ef4bcc4dfa9bfbe76488007c6c4)
|
||||
- [ ] [cv::cuda::rshift](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga87af0b66358cc302676f35c1fd56c2ed)
|
||||
- [ ] [cv::cuda::sqr](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga8aae233da90ce0ffe309ab8004342acb)
|
||||
- [ ] [cv::cuda::sqrt](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga09303680cb1a5521a922b6d392028d8c)
|
||||
- [ ] [cv::cuda::subtract](https://docs.opencv.org/master/d8/d34/group__cudaarithm__elem.html#ga6eab60fc250059e2fda79c5636bd067f)
|
||||
|
||||
- [X] **cudabgsegm. Background Segmentation**
|
||||
- [ ] cudacodec. Video Encoding/Decoding
|
||||
- [ ] cudafeatures2d. Feature Detection and Description
|
||||
- [ ] cudafilters. Image Filtering
|
||||
- [ ] cudaimgproc. Image Processing
|
||||
- [ ] **cudaimgproc. Image Processing - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [cv::cuda::TemplateMatching](https://docs.opencv.org/master/d2/d58/classcv_1_1cuda_1_1TemplateMatching.html)
|
||||
- [ ] [cv::cuda::alphaComp](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga08a698700458d9311390997b57fbf8dc)
|
||||
- [ ] [cv::cuda::demosaicing](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga7fb153572b573ebd2d7610fcbe64166e)
|
||||
- [ ] [cv::cuda::gammaCorrection](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#gaf4195a8409c3b8fbfa37295c2b2c4729)
|
||||
- [ ] [cv::cuda::swapChannels](https://docs.opencv.org/master/db/d8c/group__cudaimgproc__color.html#ga75a29cc4a97cde0d43ea066b01de927e)
|
||||
- [ ] [cv::cuda::calcHist](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#gaaf3944106890947020bb4522a7619c26)
|
||||
- [ ] [cv::cuda::CLAHE](https://docs.opencv.org/master/db/d79/classcv_1_1cuda_1_1CLAHE.html)
|
||||
- [ ] [cv::cuda::equalizeHist](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga2384be74bd2feba7e6c46815513f0060)
|
||||
- [ ] [cv::cuda::evenLevels](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga2f2cbd21dc6d7367a7c4ee1a826f389d)
|
||||
- [ ] [cv::cuda::histEven](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#gacd3b14279fb77a57a510cb8c89a1856f)
|
||||
- [ ] [cv::cuda::histRange](https://docs.opencv.org/master/d8/d0e/group__cudaimgproc__hist.html#ga87819085c1059186d9cdeacd92cea783)
|
||||
- [ ] [cv::cuda::HoughCirclesDetector](https://docs.opencv.org/master/da/d80/classcv_1_1cuda_1_1HoughCirclesDetector.html)
|
||||
- [ ] [cv::cuda::HoughLinesDetector](https://docs.opencv.org/master/d2/dcd/classcv_1_1cuda_1_1HoughLinesDetector.html)
|
||||
- [ ] [cv::cuda::HoughSegmentDetector](https://docs.opencv.org/master/d6/df9/classcv_1_1cuda_1_1HoughSegmentDetector.html)
|
||||
- [ ] [cv::cuda::createGoodFeaturesToTrackDetector](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga478b474a598ece101f7e706fee2c8e91)
|
||||
- [ ] [cv::cuda::createHarrisCorner](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga3e5878a803e9bba51added0c10101979)
|
||||
- [ ] [cv::cuda::createMinEigenValCorner](https://docs.opencv.org/master/dc/d6d/group__cudaimgproc__feature.html#ga7457fd4b53b025f990b1c1dd1b749915)
|
||||
- [ ] [cv::cuda::bilateralFilter](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga6abeaecdd4e7edc0bd1393a04f4f20bd)
|
||||
- [ ] [cv::cuda::blendLinear](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga4793607e5729bcc15b27ea33d9fe335e)
|
||||
- [ ] [cv::cuda::meanShiftFiltering](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#gae13b3035bc6df0e512d876dbb8c00555)
|
||||
- [ ] [cv::cuda::meanShiftProc](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga6039dc8ecbe2f912bc83fcc9b3bcca39)
|
||||
- [ ] [cv::cuda::meanShiftSegmentation](https://docs.opencv.org/master/d0/d05/group__cudaimgproc.html#ga70ed80533a448829dc48cf22b1845c16)
|
||||
|
||||
- [ ] cudalegacy. Legacy support
|
||||
- [ ] cudaobjdetect. Object Detection
|
||||
- [ ] **cudaoptflow. Optical Flow - WORK STARTED**
|
||||
- [X] **cudaobjdetect. Object Detection**
|
||||
- [ ] **cudaoptflow. Optical Flow - WORK STARTED** The following functions still need implementation:
|
||||
- [ ] [BroxOpticalFlow](https://docs.opencv.org/master/d7/d18/classcv_1_1cuda_1_1BroxOpticalFlow.html)
|
||||
- [ ] [DenseOpticalFlow](https://docs.opencv.org/master/d6/d4a/classcv_1_1cuda_1_1DenseOpticalFlow.html)
|
||||
- [ ] [DensePyrLKOpticalFlow](https://docs.opencv.org/master/d0/da4/classcv_1_1cuda_1_1DensePyrLKOpticalFlow.html)
|
||||
@ -208,13 +282,10 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] cudastereo. Stereo Correspondence
|
||||
- [X] **cudawarping. Image Warping**
|
||||
- [ ] cudev. Device layer
|
||||
- [ ] shape. Shape Distance and Matching
|
||||
- [ ] superres. Super Resolution
|
||||
- [ ] videostab. Video Stabilization
|
||||
- [ ] viz. 3D Visualizer
|
||||
|
||||
## Contrib modules list
|
||||
|
||||
- [ ] alphamat. Alpha Matting
|
||||
- [ ] aruco. ArUco Marker Detection
|
||||
- [X] **bgsegm. Improved Background-Foreground Segmentation Methods - WORK STARTED**
|
||||
- [ ] bioinspired. Biologically inspired vision models and derivated tools
|
||||
@ -223,26 +294,36 @@ Your pull requests will be greatly appreciated!
|
||||
- [ ] cvv. GUI for Interactive Visual Debugging of Computer Vision Programs
|
||||
- [ ] datasets. Framework for working with different datasets
|
||||
- [ ] dnn_modern. Deep Learning Modern Module
|
||||
- [ ] dnn_objdetect. DNN used for object detection
|
||||
- [ ] dnn_superres. DNN used for super resolution
|
||||
- [ ] dpm. Deformable Part-based Models
|
||||
- [ ] **face. Face Recognition - WORK STARTED**
|
||||
- [ ] freetype. Drawing UTF-8 strings with freetype/harfbuzz
|
||||
- [ ] fuzzy. Image processing based on fuzzy mathematics
|
||||
- [ ] hdf. Hierarchical Data Format I/O routines
|
||||
- [ ] hfs. Hierarchical Feature Selection for Efficient Image Segmentation
|
||||
- [X] **img_hash. The module brings implementations of different image hashing algorithms.**
|
||||
- [ ] intensity_transform. The module brings implementations of intensity transformation algorithms to adjust image contrast.
|
||||
- [ ] line_descriptor. Binary descriptors for lines extracted from an image
|
||||
- [ ] mcc. Macbeth Chart module
|
||||
- [ ] matlab. MATLAB Bridge
|
||||
- [ ] optflow. Optical Flow Algorithms
|
||||
- [ ] ovis. OGRE 3D Visualiser
|
||||
- [ ] phase_unwrapping. Phase Unwrapping API
|
||||
- [ ] plot. Plot function for Mat data
|
||||
- [ ] reg. Image Registration
|
||||
- [ ] rgbd. RGB-Depth Processing
|
||||
- [ ] saliency. Saliency API
|
||||
- [ ] sfm. Structure From Motion
|
||||
- [ ] shape. Shape Distance and Matching
|
||||
- [ ] stereo. Stereo Correspondance Algorithms
|
||||
- [ ] structured_light. Structured Light API
|
||||
- [ ] superres. Super Resolution
|
||||
- [ ] surface_matching. Surface Matching
|
||||
- [ ] text. Scene Text Detection and Recognition
|
||||
- [ ] **tracking. Tracking API - WORK STARTED**
|
||||
- [ ] videostab. Video Stabilization
|
||||
- [ ] viz. 3D Visualizer
|
||||
- [ ] **xfeatures2d. Extra 2D Features Framework - WORK STARTED**
|
||||
- [ ] ximgproc. Extended Image Processing
|
||||
- [ ] xobjdetect. Extended object detection
|
||||
|
3
vendor/gocv.io/x/gocv/appveyor.yml
generated
vendored
3
vendor/gocv.io/x/gocv/appveyor.yml
generated
vendored
@ -8,7 +8,7 @@ platform:
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOROOT: c:\go
|
||||
GOVERSION: 1.14
|
||||
GOVERSION: 1.15
|
||||
TEST_EXTERNAL: 1
|
||||
APPVEYOR_SAVE_CACHE_ON_ERROR: true
|
||||
|
||||
@ -27,6 +27,7 @@ install:
|
||||
- go get -d .
|
||||
- set GOCV_CAFFE_TEST_FILES=C:\opencv\testdata
|
||||
- set GOCV_TENSORFLOW_TEST_FILES=C:\opencv\testdata
|
||||
- set GOCV_ONNX_TEST_FILES=C:\opencv\testdata
|
||||
- set OPENCV_ENABLE_NONFREE=ON
|
||||
- go env
|
||||
|
||||
|
20
vendor/gocv.io/x/gocv/appveyor_build_opencv.cmd
generated
vendored
20
vendor/gocv.io/x/gocv/appveyor_build_opencv.cmd
generated
vendored
@ -1,23 +1,25 @@
|
||||
|
||||
if not exist "C:\opencv" mkdir "C:\opencv"
|
||||
if not exist "C:\opencv\build" mkdir "C:\opencv\build"
|
||||
if not exist "C:\opencv\testdata" mkdir "C:\opencv\testdata"
|
||||
|
||||
appveyor DownloadFile https://github.com/opencv/opencv/archive/4.4.0.zip -FileName c:\opencv\opencv-4.4.0.zip
|
||||
7z x c:\opencv\opencv-4.4.0.zip -oc:\opencv -y
|
||||
del c:\opencv\opencv-4.4.0.zip /q
|
||||
appveyor DownloadFile https://github.com/opencv/opencv_contrib/archive/4.4.0.zip -FileName c:\opencv\opencv_contrib-4.4.0.zip
|
||||
7z x c:\opencv\opencv_contrib-4.4.0.zip -oc:\opencv -y
|
||||
del c:\opencv\opencv_contrib-4.4.0.zip /q
|
||||
appveyor DownloadFile https://github.com/opencv/opencv/archive/4.5.1.zip -FileName c:\opencv\opencv-4.5.1.zip
|
||||
7z x c:\opencv\opencv-4.5.1.zip -oc:\opencv -y
|
||||
del c:\opencv\opencv-4.5.1.zip /q
|
||||
appveyor DownloadFile https://github.com/opencv/opencv_contrib/archive/4.5.1.zip -FileName c:\opencv\opencv_contrib-4.5.1.zip
|
||||
7z x c:\opencv\opencv_contrib-4.5.1.zip -oc:\opencv -y
|
||||
del c:\opencv\opencv_contrib-4.5.1.zip /q
|
||||
cd C:\opencv\build
|
||||
set PATH=C:\Perl\site\bin;C:\Perl\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\7-Zip;C:\Program Files\Microsoft\Web Platform Installer\;C:\Tools\PsTools;C:\Program Files (x86)\CMake\bin;C:\go\bin;C:\Tools\NuGet;C:\Program Files\LLVM\bin;C:\Tools\curl\bin;C:\ProgramData\chocolatey\bin;C:\Program Files (x86)\Yarn\bin;C:\Users\appveyor\AppData\Local\Yarn\bin;C:\Program Files\AppVeyor\BuildAgent\
|
||||
set PATH=%PATH%;C:\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev0\mingw64\bin
|
||||
dir C:\opencv
|
||||
cmake C:\opencv\opencv-4.4.0 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.4.0\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DBUILD_opencv_gapi=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -Wno-dev
|
||||
cmake C:\opencv\opencv-4.5.1 -G "MinGW Makefiles" -BC:\opencv\build -DENABLE_CXX11=ON -DOPENCV_EXTRA_MODULES_PATH=C:\opencv\opencv_contrib-4.5.1\modules -DBUILD_SHARED_LIBS=ON -DWITH_IPP=OFF -DWITH_MSMF=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DBUILD_opencv_java=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_python2=OFF -DBUILD_opencv_python3=OFF -DBUILD_DOCS=OFF -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_opencv_saliency=OFF -DCPU_DISPATCH= -DBUILD_opencv_gapi=OFF -DOPENCV_GENERATE_PKGCONFIG=ON -DOPENCV_ENABLE_NONFREE=ON -DWITH_OPENCL_D3D11_NV=OFF -DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t -Wno-dev
|
||||
mingw32-make -j%NUMBER_OF_PROCESSORS%
|
||||
mingw32-make install
|
||||
appveyor DownloadFile https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/bvlc_googlenet.prototxt -FileName C:\opencv\testdata\bvlc_googlenet.prototxt
|
||||
appveyor DownloadFile http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel -FileName C:\opencv\testdata\bvlc_googlenet.caffemodel
|
||||
appveyor DownloadFile https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip -FileName C:\opencv\testdata\inception5h.zip
|
||||
appveyor DownloadFile https://github.com/onnx/models/raw/master/vision/classification/inception_and_googlenet/googlenet/model/googlenet-9.onnx -FileName C:\opencv\testdata\googlenet-9.onnx
|
||||
7z x C:\opencv\testdata\inception5h.zip -oC:\opencv\testdata tensorflow_inception_graph.pb -y
|
||||
rmdir c:\opencv\opencv-4.4.0 /s /q
|
||||
rmdir c:\opencv\opencv_contrib-4.4.0 /s /q
|
||||
rmdir c:\opencv\opencv-4.5.1 /s /q
|
||||
rmdir c:\opencv\opencv_contrib-4.5.1 /s /q
|
||||
|
10
vendor/gocv.io/x/gocv/calib3d.go
generated
vendored
10
vendor/gocv.io/x/gocv/calib3d.go
generated
vendored
@ -155,6 +155,11 @@ const (
|
||||
CalibCBMarker
|
||||
)
|
||||
|
||||
// FindChessboardCorners finds the positions of internal corners of the chessboard.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga93efa9b0aa890de240ca32b11253dd4a
|
||||
//
|
||||
func FindChessboardCorners(image Mat, patternSize image.Point, corners *Mat, flags CalibCBFlag) bool {
|
||||
sz := C.struct_Size{
|
||||
width: C.int(patternSize.X),
|
||||
@ -163,6 +168,11 @@ func FindChessboardCorners(image Mat, patternSize image.Point, corners *Mat, fla
|
||||
return bool(C.FindChessboardCorners(image.Ptr(), sz, corners.Ptr(), C.int(flags)))
|
||||
}
|
||||
|
||||
// DrawChessboardCorners renders the detected chessboard corners.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga6a10b0bb120c4907e5eabbcd22319022
|
||||
//
|
||||
func DrawChessboardCorners(image *Mat, patternSize image.Point, corners Mat, patternWasFound bool) {
|
||||
sz := C.struct_Size{
|
||||
width: C.int(patternSize.X),
|
||||
|
2
vendor/gocv.io/x/gocv/cgo.go
generated
vendored
2
vendor/gocv.io/x/gocv/cgo.go
generated
vendored
@ -8,6 +8,6 @@ package gocv
|
||||
#cgo !windows pkg-config: opencv4
|
||||
#cgo CXXFLAGS: --std=c++11
|
||||
#cgo windows CPPFLAGS: -IC:/opencv/build/install/include
|
||||
#cgo windows LDFLAGS: -LC:/opencv/build/install/x64/mingw/lib -lopencv_core440 -lopencv_face440 -lopencv_videoio440 -lopencv_imgproc440 -lopencv_highgui440 -lopencv_imgcodecs440 -lopencv_objdetect440 -lopencv_features2d440 -lopencv_video440 -lopencv_dnn440 -lopencv_xfeatures2d440 -lopencv_plot440 -lopencv_tracking440 -lopencv_img_hash440 -lopencv_calib3d440 -lopencv_bgsegm440
|
||||
#cgo windows LDFLAGS: -LC:/opencv/build/install/x64/mingw/lib -lopencv_core451 -lopencv_face451 -lopencv_videoio451 -lopencv_imgproc451 -lopencv_highgui451 -lopencv_imgcodecs451 -lopencv_objdetect451 -lopencv_features2d451 -lopencv_video451 -lopencv_dnn451 -lopencv_xfeatures2d451 -lopencv_plot451 -lopencv_tracking451 -lopencv_img_hash451 -lopencv_calib3d451 -lopencv_bgsegm451 -lopencv_photo451
|
||||
*/
|
||||
import "C"
|
||||
|
87
vendor/gocv.io/x/gocv/core.cpp
generated
vendored
87
vendor/gocv.io/x/gocv/core.cpp
generated
vendored
@ -11,6 +11,15 @@ Mat Mat_NewWithSize(int rows, int cols, int type) {
|
||||
return new cv::Mat(rows, cols, type, 0.0);
|
||||
}
|
||||
|
||||
// Mat_NewWithSizes creates a new Mat with specific dimension sizes and number of channels.
|
||||
Mat Mat_NewWithSizes(struct IntVector sizes, int type) {
|
||||
std::vector<int> sizess;
|
||||
for (int i = 0; i < sizes.length; ++i) {
|
||||
sizess.push_back(sizes.val[i]);
|
||||
}
|
||||
return new cv::Mat(sizess, type);
|
||||
}
|
||||
|
||||
// Mat_NewFromScalar creates a new Mat from a Scalar. Intended to be used
|
||||
// for Mat comparison operation such as InRange.
|
||||
Mat Mat_NewFromScalar(Scalar ar, int type) {
|
||||
@ -28,6 +37,42 @@ Mat Mat_NewFromBytes(int rows, int cols, int type, struct ByteArray buf) {
|
||||
return new cv::Mat(rows, cols, type, buf.data);
|
||||
}
|
||||
|
||||
// Mat_NewWithSizesFromScalar creates multidimensional Mat from a scalar
|
||||
Mat Mat_NewWithSizesFromScalar(IntVector sizes, int type, Scalar ar) {
|
||||
std::vector<int> _sizes;
|
||||
for (int i = 0, *v = sizes.val; i < sizes.length; ++v, ++i) {
|
||||
_sizes.push_back(*v);
|
||||
}
|
||||
|
||||
cv::Scalar c = cv::Scalar(ar.val1, ar.val2, ar.val3, ar.val4);
|
||||
return new cv::Mat(_sizes, type, c);
|
||||
}
|
||||
|
||||
// Mat_NewWithSizesFromBytes creates multidimensional Mat from a bytes
|
||||
Mat Mat_NewWithSizesFromBytes(IntVector sizes, int type, struct ByteArray buf) {
|
||||
std::vector<int> _sizes;
|
||||
for (int i = 0, *v = sizes.val; i < sizes.length; ++v, ++i) {
|
||||
_sizes.push_back(*v);
|
||||
}
|
||||
|
||||
return new cv::Mat(_sizes, type, buf.data);
|
||||
}
|
||||
|
||||
Mat Eye(int rows, int cols, int type) {
|
||||
cv::Mat temp = cv::Mat::eye(rows, cols, type);
|
||||
return new cv::Mat(rows, cols, type, temp.data);
|
||||
}
|
||||
|
||||
Mat Zeros(int rows, int cols, int type) {
|
||||
cv::Mat temp = cv::Mat::zeros(rows, cols, type);
|
||||
return new cv::Mat(rows, cols, type, temp.data);
|
||||
}
|
||||
|
||||
Mat Ones(int rows, int cols, int type) {
|
||||
cv::Mat temp = cv::Mat::ones(rows, cols, type);
|
||||
return new cv::Mat(rows, cols, type, temp.data);
|
||||
}
|
||||
|
||||
Mat Mat_FromPtr(Mat m, int rows, int cols, int type, int prow, int pcol) {
|
||||
return new cv::Mat(rows, cols, type, m->ptr(prow, pcol));
|
||||
}
|
||||
@ -42,6 +87,11 @@ int Mat_Empty(Mat m) {
|
||||
return m->empty();
|
||||
}
|
||||
|
||||
// Mat_IsContinuous tests if a Mat is continuous
|
||||
bool Mat_IsContinuous(Mat m) {
|
||||
return m->isContinuous();
|
||||
}
|
||||
|
||||
// Mat_Clone returns a clone of this Mat
|
||||
Mat Mat_Clone(Mat m) {
|
||||
return new cv::Mat(m->clone());
|
||||
@ -61,6 +111,10 @@ void Mat_ConvertTo(Mat m, Mat dst, int type) {
|
||||
m->convertTo(*dst, type);
|
||||
}
|
||||
|
||||
void Mat_ConvertToWithParams(Mat m, Mat dst, int type, float alpha, float beta) {
|
||||
m->convertTo(*dst, type, alpha, beta);
|
||||
}
|
||||
|
||||
// Mat_ToBytes returns the bytes representation of the underlying data.
|
||||
struct ByteArray Mat_ToBytes(Mat m) {
|
||||
return toByteArray(reinterpret_cast<const char*>(m->data), m->total() * m->elemSize());
|
||||
@ -566,6 +620,28 @@ void Mat_MinMaxLoc(Mat m, double* minVal, double* maxVal, Point* minLoc, Point*
|
||||
maxLoc->y = cMaxLoc.y;
|
||||
}
|
||||
|
||||
void Mat_MixChannels(struct Mats src, struct Mats dst, struct IntVector fromTo) {
|
||||
std::vector<cv::Mat> srcMats;
|
||||
|
||||
for (int i = 0; i < src.length; ++i) {
|
||||
srcMats.push_back(*src.mats[i]);
|
||||
}
|
||||
|
||||
std::vector<cv::Mat> dstMats;
|
||||
|
||||
for (int i = 0; i < dst.length; ++i) {
|
||||
dstMats.push_back(*dst.mats[i]);
|
||||
}
|
||||
|
||||
std::vector<int> fromTos;
|
||||
|
||||
for (int i = 0; i < fromTo.length; ++i) {
|
||||
fromTos.push_back(fromTo.val[i]);
|
||||
}
|
||||
|
||||
cv::mixChannels(srcMats, dstMats, fromTos);
|
||||
}
|
||||
|
||||
void Mat_MulSpectrums(Mat a, Mat b, Mat c, int flags) {
|
||||
cv::mulSpectrums(*a, *b, *c, flags);
|
||||
}
|
||||
@ -586,6 +662,10 @@ double Norm(Mat src1, int normType) {
|
||||
return cv::norm(*src1, normType);
|
||||
}
|
||||
|
||||
double NormWithMats(Mat src1, Mat src2, int normType) {
|
||||
return cv::norm(*src1, *src2, normType);
|
||||
}
|
||||
|
||||
void Mat_PerspectiveTransform(Mat src, Mat dst, Mat tm) {
|
||||
cv::perspectiveTransform(*src, *dst, *tm);
|
||||
}
|
||||
@ -696,6 +776,13 @@ void Contours_Close(struct Contours cs) {
|
||||
delete[] cs.contours;
|
||||
}
|
||||
|
||||
void CStrings_Close(struct CStrings cstrs) {
|
||||
for ( int i = 0; i < cstrs.length; i++ ) {
|
||||
delete [] cstrs.strs[i];
|
||||
}
|
||||
delete [] cstrs.strs;
|
||||
}
|
||||
|
||||
void KeyPoints_Close(struct KeyPoints ks) {
|
||||
delete[] ks.keypoints;
|
||||
}
|
||||
|
307
vendor/gocv.io/x/gocv/core.go
generated
vendored
307
vendor/gocv.io/x/gocv/core.go
generated
vendored
@ -178,6 +178,9 @@ var ErrEmptyByteSlice = errors.New("empty byte array")
|
||||
//
|
||||
type Mat struct {
|
||||
p C.Mat
|
||||
|
||||
// Non-nil if Mat was created with a []byte (using NewMatFromBytes()). Nil otherwise.
|
||||
d []byte
|
||||
}
|
||||
|
||||
// NewMat returns a new empty Mat.
|
||||
@ -190,6 +193,58 @@ func NewMatWithSize(rows int, cols int, mt MatType) Mat {
|
||||
return newMat(C.Mat_NewWithSize(C.int(rows), C.int(cols), C.int(mt)))
|
||||
}
|
||||
|
||||
// NewMatWithSizes returns a new multidimensional Mat with a specific size and type.
|
||||
func NewMatWithSizes(sizes []int, mt MatType) Mat {
|
||||
sizesArray := make([]C.int, len(sizes))
|
||||
for i, s := range sizes {
|
||||
sizesArray[i] = C.int(s)
|
||||
}
|
||||
|
||||
sizesIntVector := C.IntVector{
|
||||
val: (*C.int)(&sizesArray[0]),
|
||||
length: C.int(len(sizes)),
|
||||
}
|
||||
return newMat(C.Mat_NewWithSizes(sizesIntVector, C.int(mt)))
|
||||
}
|
||||
|
||||
// NewMatWithSizesWithScalar returns a new multidimensional Mat with a specific size, type and scalar value.
|
||||
func NewMatWithSizesWithScalar(sizes []int, mt MatType, s Scalar) Mat {
|
||||
csizes := []C.int{}
|
||||
for _, v := range sizes {
|
||||
csizes = append(csizes, C.int(v))
|
||||
}
|
||||
sizesVector := C.struct_IntVector{}
|
||||
sizesVector.val = (*C.int)(&csizes[0])
|
||||
sizesVector.length = (C.int)(len(csizes))
|
||||
|
||||
sVal := C.struct_Scalar{
|
||||
val1: C.double(s.Val1),
|
||||
val2: C.double(s.Val2),
|
||||
val3: C.double(s.Val3),
|
||||
val4: C.double(s.Val4),
|
||||
}
|
||||
|
||||
return newMat(C.Mat_NewWithSizesFromScalar(sizesVector, C.int(mt), sVal))
|
||||
}
|
||||
|
||||
// NewMatWithSizesWithScalar returns a new multidimensional Mat with a specific size, type and preexisting data.
|
||||
func NewMatWithSizesFromBytes(sizes []int, mt MatType, data []byte) (Mat, error) {
|
||||
cBytes, err := toByteArray(data)
|
||||
if err != nil {
|
||||
return Mat{}, err
|
||||
}
|
||||
|
||||
csizes := []C.int{}
|
||||
for _, v := range sizes {
|
||||
csizes = append(csizes, C.int(v))
|
||||
}
|
||||
sizesVector := C.struct_IntVector{}
|
||||
sizesVector.val = (*C.int)(&csizes[0])
|
||||
sizesVector.length = (C.int)(len(csizes))
|
||||
|
||||
return newMat(C.Mat_NewWithSizesFromBytes(sizesVector, C.int(mt), *cBytes)), nil
|
||||
}
|
||||
|
||||
// NewMatFromScalar returns a new Mat for a specific Scalar value
|
||||
func NewMatFromScalar(s Scalar, mt MatType) Mat {
|
||||
sVal := C.struct_Scalar{
|
||||
@ -221,7 +276,44 @@ func NewMatFromBytes(rows int, cols int, mt MatType, data []byte) (Mat, error) {
|
||||
if err != nil {
|
||||
return Mat{}, err
|
||||
}
|
||||
return newMat(C.Mat_NewFromBytes(C.int(rows), C.int(cols), C.int(mt), *cBytes)), nil
|
||||
mat := newMat(C.Mat_NewFromBytes(C.int(rows), C.int(cols), C.int(mt), *cBytes))
|
||||
|
||||
// Store a reference to the backing data slice. This is needed because we pass the backing
|
||||
// array directly to C code and without keeping a Go reference to it, it might end up
|
||||
// garbage collected which would result in crashes.
|
||||
//
|
||||
// TODO(bga): This could live in newMat() but I wanted to reduce the change surface.
|
||||
// TODO(bga): Code that needs access to the array from Go could use this directly.
|
||||
mat.d = data
|
||||
|
||||
return mat, nil
|
||||
}
|
||||
|
||||
// Returns an identity matrix of the specified size and type.
|
||||
//
|
||||
// The method returns a Matlab-style identity matrix initializer, similarly to Mat::zeros. Similarly to Mat::ones.
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#a2cf9b9acde7a9852542bbc20ef851ed2
|
||||
func Eye(rows int, cols int, mt MatType) Mat {
|
||||
return newMat(C.Eye(C.int(rows), C.int(cols), C.int(mt)))
|
||||
}
|
||||
|
||||
// Returns a zero array of the specified size and type.
|
||||
//
|
||||
// The method returns a Matlab-style zero array initializer.
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#a0b57b6a326c8876d944d188a46e0f556
|
||||
func Zeros(rows int, cols int, mt MatType) Mat {
|
||||
return newMat(C.Zeros(C.int(rows), C.int(cols), C.int(mt)))
|
||||
}
|
||||
|
||||
// Returns an array of all 1's of the specified size and type.
|
||||
//
|
||||
// The method returns a Matlab-style 1's array initializer
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#a69ae0402d116fc9c71908d8508dc2f09
|
||||
func Ones(rows int, cols int, mt MatType) Mat {
|
||||
return newMat(C.Ones(C.int(rows), C.int(cols), C.int(mt)))
|
||||
}
|
||||
|
||||
// FromPtr returns a new Mat with a specific size and type, initialized from a Mat Ptr.
|
||||
@ -240,6 +332,15 @@ func (m *Mat) Empty() bool {
|
||||
return isEmpty != 0
|
||||
}
|
||||
|
||||
// IsContinuous determines if the Mat is continuous.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html#aa90cea495029c7d1ee0a41361ccecdf3
|
||||
//
|
||||
func (m *Mat) IsContinuous() bool {
|
||||
return bool(C.Mat_IsContinuous(m.p))
|
||||
}
|
||||
|
||||
// Clone returns a cloned full copy of the Mat.
|
||||
func (m *Mat) Clone() Mat {
|
||||
return newMat(C.Mat_Clone(m.p))
|
||||
@ -275,6 +376,11 @@ func (m *Mat) ConvertTo(dst *Mat, mt MatType) {
|
||||
return
|
||||
}
|
||||
|
||||
func (m *Mat) ConvertToWithParams(dst *Mat, mt MatType, alpha, beta float32) {
|
||||
C.Mat_ConvertToWithParams(m.p, dst.p, C.int(mt), C.float(alpha), C.float(beta))
|
||||
return
|
||||
}
|
||||
|
||||
// Total returns the total number of array elements.
|
||||
//
|
||||
// For further details, please see:
|
||||
@ -320,28 +426,40 @@ func (m *Mat) ToBytes() []byte {
|
||||
//
|
||||
// The data is no longer valid once the Mat has been closed. Any data that
|
||||
// needs to be accessed after the Mat is closed must be copied into Go memory.
|
||||
func (m *Mat) DataPtrUint8() []uint8 {
|
||||
func (m *Mat) DataPtrUint8() ([]uint8, error) {
|
||||
if !m.IsContinuous() {
|
||||
return nil, errors.New("DataPtrUint8 requires continuous Mat")
|
||||
}
|
||||
|
||||
p := C.Mat_DataPtr(m.p)
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p.data)),
|
||||
Len: int(p.length),
|
||||
Cap: int(p.length),
|
||||
}
|
||||
return *(*[]uint8)(unsafe.Pointer(h))
|
||||
return *(*[]uint8)(unsafe.Pointer(h)), nil
|
||||
}
|
||||
|
||||
// DataPtrInt8 returns a slice that references the OpenCV allocated data.
|
||||
//
|
||||
// The data is no longer valid once the Mat has been closed. Any data that
|
||||
// needs to be accessed after the Mat is closed must be copied into Go memory.
|
||||
func (m *Mat) DataPtrInt8() []int8 {
|
||||
func (m *Mat) DataPtrInt8() ([]int8, error) {
|
||||
if m.Type()&MatTypeCV8S != MatTypeCV8S {
|
||||
return nil, errors.New("DataPtrInt8 only supports MatTypeCV8S")
|
||||
}
|
||||
|
||||
if !m.IsContinuous() {
|
||||
return nil, errors.New("DataPtrInt8 requires continuous Mat")
|
||||
}
|
||||
|
||||
p := C.Mat_DataPtr(m.p)
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p.data)),
|
||||
Len: int(p.length),
|
||||
Cap: int(p.length),
|
||||
}
|
||||
return *(*[]int8)(unsafe.Pointer(h))
|
||||
return *(*[]int8)(unsafe.Pointer(h)), nil
|
||||
}
|
||||
|
||||
// DataPtrUint16 returns a slice that references the OpenCV allocated data.
|
||||
@ -353,6 +471,10 @@ func (m *Mat) DataPtrUint16() ([]uint16, error) {
|
||||
return nil, errors.New("DataPtrUint16 only supports MatTypeCV16U")
|
||||
}
|
||||
|
||||
if !m.IsContinuous() {
|
||||
return nil, errors.New("DataPtrUint16 requires continuous Mat")
|
||||
}
|
||||
|
||||
p := C.Mat_DataPtr(m.p)
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p.data)),
|
||||
@ -371,6 +493,10 @@ func (m *Mat) DataPtrInt16() ([]int16, error) {
|
||||
return nil, errors.New("DataPtrInt16 only supports MatTypeCV16S")
|
||||
}
|
||||
|
||||
if !m.IsContinuous() {
|
||||
return nil, errors.New("DataPtrInt16 requires continuous Mat")
|
||||
}
|
||||
|
||||
p := C.Mat_DataPtr(m.p)
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p.data)),
|
||||
@ -389,6 +515,10 @@ func (m *Mat) DataPtrFloat32() ([]float32, error) {
|
||||
return nil, errors.New("DataPtrFloat32 only supports MatTypeCV32F")
|
||||
}
|
||||
|
||||
if !m.IsContinuous() {
|
||||
return nil, errors.New("DataPtrFloat32 requires continuous Mat")
|
||||
}
|
||||
|
||||
p := C.Mat_DataPtr(m.p)
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p.data)),
|
||||
@ -407,6 +537,10 @@ func (m *Mat) DataPtrFloat64() ([]float64, error) {
|
||||
return nil, errors.New("DataPtrFloat64 only supports MatTypeCV64F")
|
||||
}
|
||||
|
||||
if !m.IsContinuous() {
|
||||
return nil, errors.New("DataPtrFloat64 requires continuous Mat")
|
||||
}
|
||||
|
||||
p := C.Mat_DataPtr(m.p)
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p.data)),
|
||||
@ -745,106 +879,6 @@ func (m *Mat) T() Mat {
|
||||
return newMat(C.Mat_T(m.p))
|
||||
}
|
||||
|
||||
// ToImage converts a Mat to a image.Image.
|
||||
func (m *Mat) ToImage() (image.Image, error) {
|
||||
t := m.Type()
|
||||
if t != MatTypeCV8UC1 && t != MatTypeCV8UC3 && t != MatTypeCV8UC4 {
|
||||
return nil, errors.New("ToImage supports only MatType CV8UC1, CV8UC3 and CV8UC4")
|
||||
}
|
||||
|
||||
width := m.Cols()
|
||||
height := m.Rows()
|
||||
step := m.Step()
|
||||
data := m.ToBytes()
|
||||
channels := m.Channels()
|
||||
|
||||
if t == MatTypeCV8UC1 {
|
||||
img := image.NewGray(image.Rect(0, 0, width, height))
|
||||
c := color.Gray{Y: uint8(0)}
|
||||
|
||||
for y := 0; y < height; y++ {
|
||||
for x := 0; x < width; x++ {
|
||||
c.Y = uint8(data[y*step+x])
|
||||
img.SetGray(x, y, c)
|
||||
}
|
||||
}
|
||||
|
||||
return img, nil
|
||||
}
|
||||
|
||||
img := image.NewRGBA(image.Rect(0, 0, width, height))
|
||||
c := color.RGBA{
|
||||
R: uint8(0),
|
||||
G: uint8(0),
|
||||
B: uint8(0),
|
||||
A: uint8(255),
|
||||
}
|
||||
|
||||
for y := 0; y < height; y++ {
|
||||
for x := 0; x < step; x = x + channels {
|
||||
c.B = uint8(data[y*step+x])
|
||||
c.G = uint8(data[y*step+x+1])
|
||||
c.R = uint8(data[y*step+x+2])
|
||||
if channels == 4 {
|
||||
c.A = uint8(data[y*step+x+3])
|
||||
}
|
||||
img.SetRGBA(int(x/channels), y, c)
|
||||
}
|
||||
}
|
||||
|
||||
return img, nil
|
||||
}
|
||||
|
||||
//ImageToMatRGBA converts image.Image to gocv.Mat,
|
||||
//which represents RGBA image having 8bit for each component.
|
||||
//Type of Mat is gocv.MatTypeCV8UC4.
|
||||
func ImageToMatRGBA(img image.Image) (Mat, error) {
|
||||
bounds := img.Bounds()
|
||||
x := bounds.Dx()
|
||||
y := bounds.Dy()
|
||||
data := make([]byte, 0, x*y*4)
|
||||
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
|
||||
for i := bounds.Min.X; i < bounds.Max.X; i++ {
|
||||
r, g, b, a := img.At(i, j).RGBA()
|
||||
data = append(data, byte(b>>8), byte(g>>8), byte(r>>8), byte(a>>8))
|
||||
}
|
||||
}
|
||||
return NewMatFromBytes(y, x, MatTypeCV8UC4, data)
|
||||
}
|
||||
|
||||
//ImageToMatRGB converts image.Image to gocv.Mat,
|
||||
//which represents RGB image having 8bit for each component.
|
||||
//Type of Mat is gocv.MatTypeCV8UC3.
|
||||
func ImageToMatRGB(img image.Image) (Mat, error) {
|
||||
bounds := img.Bounds()
|
||||
x := bounds.Dx()
|
||||
y := bounds.Dy()
|
||||
data := make([]byte, 0, x*y*3)
|
||||
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
|
||||
for i := bounds.Min.X; i < bounds.Max.X; i++ {
|
||||
r, g, b, _ := img.At(i, j).RGBA()
|
||||
data = append(data, byte(b>>8), byte(g>>8), byte(r>>8))
|
||||
}
|
||||
}
|
||||
return NewMatFromBytes(y, x, MatTypeCV8UC3, data)
|
||||
}
|
||||
|
||||
//ImageGrayToMatGray converts image.Gray to gocv.Mat,
|
||||
//which represents grayscale image 8bit.
|
||||
//Type of Mat is gocv.MatTypeCV8UC1.
|
||||
func ImageGrayToMatGray(img *image.Gray) (Mat, error) {
|
||||
bounds := img.Bounds()
|
||||
x := bounds.Dx()
|
||||
y := bounds.Dy()
|
||||
data := make([]byte, 0, x*y)
|
||||
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
|
||||
for i := bounds.Min.X; i < bounds.Max.X; i++ {
|
||||
data = append(data, img.GrayAt(i, j).Y)
|
||||
}
|
||||
}
|
||||
return NewMatFromBytes(y, x, MatTypeCV8UC1, data)
|
||||
}
|
||||
|
||||
// AbsDiff calculates the per-element absolute difference between two arrays
|
||||
// or between an array and a scalar.
|
||||
//
|
||||
@ -1478,6 +1512,47 @@ func MinMaxLoc(input Mat) (minVal, maxVal float32, minLoc, maxLoc image.Point) {
|
||||
return float32(cMinVal), float32(cMaxVal), minLoc, maxLoc
|
||||
}
|
||||
|
||||
// Copies specified channels from input arrays to the specified channels of output arrays.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga51d768c270a1cdd3497255017c4504be
|
||||
//
|
||||
func MixChannels(src []Mat, dst []Mat, fromTo []int) {
|
||||
cSrcArray := make([]C.Mat, len(src))
|
||||
for i, r := range src {
|
||||
cSrcArray[i] = r.p
|
||||
}
|
||||
cSrcMats := C.struct_Mats{
|
||||
mats: (*C.Mat)(&cSrcArray[0]),
|
||||
length: C.int(len(src)),
|
||||
}
|
||||
|
||||
cDstArray := make([]C.Mat, len(dst))
|
||||
for i, r := range dst {
|
||||
cDstArray[i] = r.p
|
||||
}
|
||||
cDstMats := C.struct_Mats{
|
||||
mats: (*C.Mat)(&cDstArray[0]),
|
||||
length: C.int(len(dst)),
|
||||
}
|
||||
|
||||
cFromToArray := make([]C.int, len(fromTo))
|
||||
for i, ft := range fromTo {
|
||||
cFromToArray[i] = C.int(ft)
|
||||
}
|
||||
|
||||
cFromToIntVector := C.IntVector{
|
||||
val: (*C.int)(&cFromToArray[0]),
|
||||
length: C.int(len(fromTo)),
|
||||
}
|
||||
|
||||
C.Mat_MixChannels(cSrcMats, cDstMats, cFromToIntVector)
|
||||
|
||||
for i := C.int(0); i < cDstMats.length; i++ {
|
||||
dst[i].p = C.Mats_get(cDstMats, i)
|
||||
}
|
||||
}
|
||||
|
||||
//Mulspectrums performs the per-element multiplication of two Fourier spectrums.
|
||||
//
|
||||
// For further details, please see:
|
||||
@ -1561,6 +1636,15 @@ func Norm(src1 Mat, normType NormType) float64 {
|
||||
return float64(C.Norm(src1.p, C.int(normType)))
|
||||
}
|
||||
|
||||
// Norm calculates the absolute difference/relative norm of two arrays.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga7c331fb8dd951707e184ef4e3f21dd33
|
||||
//
|
||||
func NormWithMats(src1 Mat, src2 Mat, normType NormType) float64 {
|
||||
return float64(C.NormWithMats(src1.p, src2.p, C.int(normType)))
|
||||
}
|
||||
|
||||
// PerspectiveTransform performs the perspective matrix transformation of vectors.
|
||||
//
|
||||
// For further details, please see:
|
||||
@ -1727,6 +1811,7 @@ func SortIdx(src Mat, dst *Mat, flags SortFlags) {
|
||||
}
|
||||
|
||||
// Split creates an array of single channel images from a multi-channel image
|
||||
// Created images should be closed manualy to avoid memory leaks.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d2/de8/group__core__array.html#ga0547c7fed86152d7e9d0096029c8518a
|
||||
@ -1738,6 +1823,7 @@ func Split(src Mat) (mv []Mat) {
|
||||
mv = make([]Mat, cMats.length)
|
||||
for i := C.int(0); i < cMats.length; i++ {
|
||||
mv[i].p = C.Mats_get(cMats, i)
|
||||
addMatToProfile(mv[i].p)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -1957,6 +2043,17 @@ func toGoBytes(b C.struct_ByteArray) []byte {
|
||||
return C.GoBytes(unsafe.Pointer(b.data), b.length)
|
||||
}
|
||||
|
||||
// Converts CStrings to a slice of Go strings even when the C strings are not contiguous in memory
|
||||
func toGoStrings(strs C.CStrings) []string {
|
||||
length := int(strs.length)
|
||||
tmpslice := (*[1 << 20]*C.char)(unsafe.Pointer(strs.strs))[:length:length]
|
||||
gostrings := make([]string, length)
|
||||
for i, s := range tmpslice {
|
||||
gostrings[i] = C.GoString(s)
|
||||
}
|
||||
return gostrings
|
||||
}
|
||||
|
||||
func toRectangles(ret C.Rects) []image.Rectangle {
|
||||
cArray := ret.rects
|
||||
length := int(ret.length)
|
||||
|
12
vendor/gocv.io/x/gocv/core.h
generated
vendored
12
vendor/gocv.io/x/gocv/core.h
generated
vendored
@ -232,18 +232,23 @@ void MultiDMatches_Close(struct MultiDMatches mds);
|
||||
|
||||
Mat Mat_New();
|
||||
Mat Mat_NewWithSize(int rows, int cols, int type);
|
||||
Mat Mat_NewWithSizes(struct IntVector sizes, int type);
|
||||
Mat Mat_NewWithSizesFromScalar(IntVector sizes, int type, Scalar ar);
|
||||
Mat Mat_NewWithSizesFromBytes(IntVector sizes, int type, struct ByteArray buf);
|
||||
Mat Mat_NewFromScalar(const Scalar ar, int type);
|
||||
Mat Mat_NewWithSizeFromScalar(const Scalar ar, int rows, int cols, int type);
|
||||
Mat Mat_NewFromBytes(int rows, int cols, int type, struct ByteArray buf);
|
||||
Mat Mat_FromPtr(Mat m, int rows, int cols, int type, int prows, int pcols);
|
||||
void Mat_Close(Mat m);
|
||||
int Mat_Empty(Mat m);
|
||||
bool Mat_IsContinuous(Mat m);
|
||||
Mat Mat_Clone(Mat m);
|
||||
void Mat_CopyTo(Mat m, Mat dst);
|
||||
int Mat_Total(Mat m);
|
||||
void Mat_Size(Mat m, IntVector* res);
|
||||
void Mat_CopyToWithMask(Mat m, Mat dst, Mat mask);
|
||||
void Mat_ConvertTo(Mat m, Mat dst, int type);
|
||||
void Mat_ConvertToWithParams(Mat m, Mat dst, int type, float alpha, float beta);
|
||||
struct ByteArray Mat_ToBytes(Mat m);
|
||||
struct ByteArray Mat_DataPtr(Mat m);
|
||||
Mat Mat_Region(Mat m, Rect r);
|
||||
@ -258,6 +263,9 @@ int Mat_Cols(Mat m);
|
||||
int Mat_Channels(Mat m);
|
||||
int Mat_Type(Mat m);
|
||||
int Mat_Step(Mat m);
|
||||
Mat Eye(int rows, int cols, int type);
|
||||
Mat Zeros(int rows, int cols, int type);
|
||||
Mat Ones(int rows, int cols, int type);
|
||||
|
||||
uint8_t Mat_GetUChar(Mat m, int row, int col);
|
||||
uint8_t Mat_GetUChar3(Mat m, int x, int y, int z);
|
||||
@ -354,12 +362,14 @@ void Mat_Merge(struct Mats mats, Mat dst);
|
||||
void Mat_Min(Mat src1, Mat src2, Mat dst);
|
||||
void Mat_MinMaxIdx(Mat m, double* minVal, double* maxVal, int* minIdx, int* maxIdx);
|
||||
void Mat_MinMaxLoc(Mat m, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc);
|
||||
void Mat_MixChannels(struct Mats src, struct Mats dst, struct IntVector fromTo);
|
||||
void Mat_MulSpectrums(Mat a, Mat b, Mat c, int flags);
|
||||
void Mat_Multiply(Mat src1, Mat src2, Mat dst);
|
||||
void Mat_MultiplyWithParams(Mat src1, Mat src2, Mat dst, double scale, int dtype);
|
||||
void Mat_Subtract(Mat src1, Mat src2, Mat dst);
|
||||
void Mat_Normalize(Mat src, Mat dst, double alpha, double beta, int typ);
|
||||
double Norm(Mat src1, int normType);
|
||||
double NormWithMats(Mat src1, Mat src2, int normType);
|
||||
void Mat_PerspectiveTransform(Mat src, Mat dst, Mat tm);
|
||||
bool Mat_Solve(Mat src1, Mat src2, Mat dst, int flags);
|
||||
int Mat_SolveCubic(Mat coeffs, Mat roots);
|
||||
@ -390,6 +400,8 @@ Mat Mat_colRange(Mat m,int startrow,int endrow);
|
||||
|
||||
void IntVector_Close(struct IntVector ivec);
|
||||
|
||||
void CStrings_Close(struct CStrings cstrs);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
95
vendor/gocv.io/x/gocv/dnn.cpp
generated
vendored
95
vendor/gocv.io/x/gocv/dnn.cpp
generated
vendored
@ -33,6 +33,21 @@ Net Net_ReadNetFromTensorflowBytes(struct ByteArray model) {
|
||||
return n;
|
||||
}
|
||||
|
||||
Net Net_ReadNetFromTorch(const char* model) {
|
||||
Net n = new cv::dnn::Net(cv::dnn::readNetFromTorch(model));
|
||||
return n;
|
||||
}
|
||||
|
||||
Net Net_ReadNetFromONNX(const char* model) {
|
||||
Net n = new cv::dnn::Net(cv::dnn::readNetFromONNX(model));
|
||||
return n;
|
||||
}
|
||||
|
||||
Net Net_ReadNetFromONNXBytes(struct ByteArray model) {
|
||||
Net n = new cv::dnn::Net(cv::dnn::readNetFromONNX(model.data, model.length));
|
||||
return n;
|
||||
}
|
||||
|
||||
void Net_Close(Net net) {
|
||||
delete net;
|
||||
}
|
||||
@ -110,11 +125,9 @@ void Net_GetLayerNames(Net net, CStrings* names) {
|
||||
Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
|
||||
bool crop) {
|
||||
cv::Size sz(size.width, size.height);
|
||||
|
||||
// set the output ddepth to the input image depth
|
||||
int ddepth = image->depth();
|
||||
cv::Scalar cm(mean.val1, mean.val2, mean.val3, mean.val4);
|
||||
return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, cm, swapRB, crop, ddepth));
|
||||
// use the default target ddepth here.
|
||||
return new cv::Mat(cv::dnn::blobFromImage(*image, scalefactor, sz, cm, swapRB, crop));
|
||||
}
|
||||
|
||||
void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
|
||||
@ -128,8 +141,8 @@ void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size s
|
||||
cv::Size sz(size.width, size.height);
|
||||
cv::Scalar cm = cv::Scalar(mean.val1, mean.val2, mean.val3, mean.val4);
|
||||
|
||||
// TODO: handle different version signatures of this function v2 vs v3.
|
||||
cv::dnn::blobFromImages(imgs, *blob, scalefactor, sz, cm, swapRB, crop, ddepth);
|
||||
// ignore the passed in ddepth, just use default.
|
||||
cv::dnn::blobFromImages(imgs, *blob, scalefactor, sz, cm, swapRB, crop);
|
||||
}
|
||||
|
||||
void Net_ImagesFromBlob(Mat blob_, struct Mats* images_) {
|
||||
@ -181,3 +194,73 @@ const char* Layer_GetName(Layer layer) {
|
||||
const char* Layer_GetType(Layer layer) {
|
||||
return (*layer)->type.c_str();
|
||||
}
|
||||
|
||||
void NMSBoxes(struct Rects bboxes, FloatVector scores, float score_threshold, float nms_threshold, IntVector* indices) {
|
||||
std::vector<cv::Rect> _bboxes;
|
||||
|
||||
for (int i = 0; i < bboxes.length; ++i) {
|
||||
_bboxes.push_back(cv::Rect(
|
||||
bboxes.rects[i].x,
|
||||
bboxes.rects[i].y,
|
||||
bboxes.rects[i].width,
|
||||
bboxes.rects[i].height
|
||||
));
|
||||
}
|
||||
|
||||
std::vector<float> _scores;
|
||||
|
||||
float* f;
|
||||
int i;
|
||||
for (i = 0, f = scores.val; i < scores.length; ++f, ++i) {
|
||||
_scores.push_back(*f);
|
||||
}
|
||||
|
||||
std::vector<int> _indices(indices->length);
|
||||
|
||||
cv::dnn::NMSBoxes(_bboxes, _scores, score_threshold, nms_threshold, _indices, 1.f, 0);
|
||||
|
||||
int* ptr = new int[_indices.size()];
|
||||
|
||||
for (size_t i=0; i<_indices.size(); ++i) {
|
||||
ptr[i] = _indices[i];
|
||||
}
|
||||
|
||||
indices->length = _indices.size();
|
||||
indices->val = ptr;
|
||||
return;
|
||||
}
|
||||
|
||||
void NMSBoxesWithParams(struct Rects bboxes, FloatVector scores, const float score_threshold, const float nms_threshold, IntVector* indices, const float eta, const int top_k) {
|
||||
std::vector<cv::Rect> _bboxes;
|
||||
|
||||
for (int i = 0; i < bboxes.length; ++i) {
|
||||
_bboxes.push_back(cv::Rect(
|
||||
bboxes.rects[i].x,
|
||||
bboxes.rects[i].y,
|
||||
bboxes.rects[i].width,
|
||||
bboxes.rects[i].height
|
||||
));
|
||||
}
|
||||
|
||||
std::vector<float> _scores;
|
||||
|
||||
float* f;
|
||||
int i;
|
||||
for (i = 0, f = scores.val; i < scores.length; ++f, ++i) {
|
||||
_scores.push_back(*f);
|
||||
}
|
||||
|
||||
std::vector<int> _indices(indices->length);
|
||||
|
||||
cv::dnn::NMSBoxes(_bboxes, _scores, score_threshold, nms_threshold, _indices, eta, top_k);
|
||||
|
||||
int* ptr = new int[_indices.size()];
|
||||
|
||||
for (size_t i=0; i<_indices.size(); ++i) {
|
||||
ptr[i] = _indices[i];
|
||||
}
|
||||
|
||||
indices->length = _indices.size();
|
||||
indices->val = ptr;
|
||||
return;
|
||||
}
|
151
vendor/gocv.io/x/gocv/dnn.go
generated
vendored
151
vendor/gocv.io/x/gocv/dnn.go
generated
vendored
@ -180,6 +180,7 @@ func (net *Net) ForwardLayers(outBlobNames []string) (blobs []Mat) {
|
||||
blobs = make([]Mat, cMats.length)
|
||||
for i := C.int(0); i < cMats.length; i++ {
|
||||
blobs[i].p = C.Mats_get(cMats, i)
|
||||
addMatToProfile(blobs[i].p)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -292,6 +293,43 @@ func ReadNetFromTensorflowBytes(model []byte) (Net, error) {
|
||||
return Net{p: unsafe.Pointer(C.Net_ReadNetFromTensorflowBytes(*bModel))}, nil
|
||||
}
|
||||
|
||||
// ReadNetFromTorch reads a network model stored in Torch framework's format (t7).
|
||||
// check net.Empty() for read failure
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#gaaaed8c8530e9e92fe6647700c13d961e
|
||||
//
|
||||
func ReadNetFromTorch(model string) Net {
|
||||
cmodel := C.CString(model)
|
||||
defer C.free(unsafe.Pointer(cmodel))
|
||||
return Net{p: unsafe.Pointer(C.Net_ReadNetFromTorch(cmodel))}
|
||||
}
|
||||
|
||||
// ReadNetFromONNX reads a network model stored in ONNX framework's format.
|
||||
// check net.Empty() for read failure
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga7faea56041d10c71dbbd6746ca854197
|
||||
//
|
||||
func ReadNetFromONNX(model string) Net {
|
||||
cmodel := C.CString(model)
|
||||
defer C.free(unsafe.Pointer(cmodel))
|
||||
return Net{p: unsafe.Pointer(C.Net_ReadNetFromONNX(cmodel))}
|
||||
}
|
||||
|
||||
// ReadNetFromONNXBytes reads a network model stored in ONNX framework's format.
|
||||
//
|
||||
// For further details, please see:
|
||||
// https://docs.opencv.org/master/d6/d0f/group__dnn.html#ga9198ecaac7c32ddf0aa7a1bcbd359567
|
||||
//
|
||||
func ReadNetFromONNXBytes(model []byte) (Net, error) {
|
||||
bModel, err := toByteArray(model)
|
||||
if err != nil {
|
||||
return Net{}, err
|
||||
}
|
||||
return Net{p: unsafe.Pointer(C.Net_ReadNetFromONNXBytes(*bModel))}, nil
|
||||
}
|
||||
|
||||
// BlobFromImage creates 4-dimensional blob from image. Optionally resizes and crops
|
||||
// image from center, subtract mean values, scales values by scalefactor,
|
||||
// swap Blue and Red channels.
|
||||
@ -414,13 +452,14 @@ func (net *Net) GetPerfProfile() float64 {
|
||||
func (net *Net) GetUnconnectedOutLayers() (ids []int) {
|
||||
cids := C.IntVector{}
|
||||
C.Net_GetUnconnectedOutLayers((C.Net)(net.p), &cids)
|
||||
defer C.free(unsafe.Pointer(cids.val))
|
||||
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(cids.val)),
|
||||
Len: int(cids.length),
|
||||
Cap: int(cids.length),
|
||||
}
|
||||
pcids := *(*[]int)(unsafe.Pointer(h))
|
||||
pcids := *(*[]C.int)(unsafe.Pointer(h))
|
||||
|
||||
for i := 0; i < int(cids.length); i++ {
|
||||
ids = append(ids, int(pcids[i]))
|
||||
@ -435,19 +474,9 @@ func (net *Net) GetUnconnectedOutLayers() (ids []int) {
|
||||
//
|
||||
func (net *Net) GetLayerNames() (names []string) {
|
||||
cstrs := C.CStrings{}
|
||||
defer C.CStrings_Close(cstrs)
|
||||
C.Net_GetLayerNames((C.Net)(net.p), &cstrs)
|
||||
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(cstrs.strs)),
|
||||
Len: int(cstrs.length),
|
||||
Cap: int(cstrs.length),
|
||||
}
|
||||
pcstrs := *(*[]string)(unsafe.Pointer(h))
|
||||
|
||||
for i := 0; i < int(cstrs.length); i++ {
|
||||
names = append(names, string(pcstrs[i]))
|
||||
}
|
||||
return
|
||||
return toGoStrings(cstrs)
|
||||
}
|
||||
|
||||
// Close Layer
|
||||
@ -488,3 +517,99 @@ func (l *Layer) OutputNameToIndex(name string) int {
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
return int(C.Layer_OutputNameToIndex((C.Layer)(l.p), cName))
|
||||
}
|
||||
|
||||
// NMSBoxes performs non maximum suppression given boxes and corresponding scores.
|
||||
//
|
||||
// For futher details, please see:
|
||||
// https://docs.opencv.org/4.4.0/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee
|
||||
func NMSBoxes(bboxes []image.Rectangle, scores []float32, scoreThreshold float32, nmsThreshold float32, indices []int) {
|
||||
bboxesRectArr := []C.struct_Rect{}
|
||||
for _, v := range bboxes {
|
||||
bbox := C.struct_Rect{
|
||||
x: C.int(v.Min.X),
|
||||
y: C.int(v.Min.Y),
|
||||
width: C.int(v.Size().X),
|
||||
height: C.int(v.Size().Y),
|
||||
}
|
||||
bboxesRectArr = append(bboxesRectArr, bbox)
|
||||
}
|
||||
|
||||
bboxesRects := C.Rects{
|
||||
rects: (*C.Rect)(&bboxesRectArr[0]),
|
||||
length: C.int(len(bboxes)),
|
||||
}
|
||||
|
||||
scoresFloats := []C.float{}
|
||||
for _, v := range scores {
|
||||
scoresFloats = append(scoresFloats, C.float(v))
|
||||
}
|
||||
scoresVector := C.struct_FloatVector{}
|
||||
scoresVector.val = (*C.float)(&scoresFloats[0])
|
||||
scoresVector.length = (C.int)(len(scoresFloats))
|
||||
|
||||
indicesVector := C.IntVector{}
|
||||
|
||||
C.NMSBoxes(bboxesRects, scoresVector, C.float(scoreThreshold), C.float(nmsThreshold), &indicesVector)
|
||||
defer C.free(unsafe.Pointer(indicesVector.val))
|
||||
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(indicesVector.val)),
|
||||
Len: int(indicesVector.length),
|
||||
Cap: int(indicesVector.length),
|
||||
}
|
||||
|
||||
ptr := *(*[]C.int)(unsafe.Pointer(h))
|
||||
|
||||
for i := 0; i < int(indicesVector.length); i++ {
|
||||
indices[i] = int(ptr[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NMSBoxesWithParams performs non maximum suppression given boxes and corresponding scores.
|
||||
//
|
||||
// For futher details, please see:
|
||||
// https://docs.opencv.org/4.4.0/d6/d0f/group__dnn.html#ga9d118d70a1659af729d01b10233213ee
|
||||
func NMSBoxesWithParams(bboxes []image.Rectangle, scores []float32, scoreThreshold float32, nmsThreshold float32, indices []int, eta float32, topK int) {
|
||||
bboxesRectArr := []C.struct_Rect{}
|
||||
for _, v := range bboxes {
|
||||
bbox := C.struct_Rect{
|
||||
x: C.int(v.Min.X),
|
||||
y: C.int(v.Min.Y),
|
||||
width: C.int(v.Size().X),
|
||||
height: C.int(v.Size().Y),
|
||||
}
|
||||
bboxesRectArr = append(bboxesRectArr, bbox)
|
||||
}
|
||||
|
||||
bboxesRects := C.Rects{
|
||||
rects: (*C.Rect)(&bboxesRectArr[0]),
|
||||
length: C.int(len(bboxes)),
|
||||
}
|
||||
|
||||
scoresFloats := []C.float{}
|
||||
for _, v := range scores {
|
||||
scoresFloats = append(scoresFloats, C.float(v))
|
||||
}
|
||||
scoresVector := C.struct_FloatVector{}
|
||||
scoresVector.val = (*C.float)(&scoresFloats[0])
|
||||
scoresVector.length = (C.int)(len(scoresFloats))
|
||||
|
||||
indicesVector := C.IntVector{}
|
||||
|
||||
C.NMSBoxesWithParams(bboxesRects, scoresVector, C.float(scoreThreshold), C.float(nmsThreshold), &indicesVector, C.float(eta), C.int(topK))
|
||||
defer C.free(unsafe.Pointer(indicesVector.val))
|
||||
|
||||
h := &reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(indicesVector.val)),
|
||||
Len: int(indicesVector.length),
|
||||
Cap: int(indicesVector.length),
|
||||
}
|
||||
|
||||
ptr := *(*[]C.int)(unsafe.Pointer(h))
|
||||
|
||||
for i := 0; i < int(indicesVector.length); i++ {
|
||||
indices[i] = int(ptr[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
6
vendor/gocv.io/x/gocv/dnn.h
generated
vendored
6
vendor/gocv.io/x/gocv/dnn.h
generated
vendored
@ -25,6 +25,9 @@ Net Net_ReadNetFromCaffe(const char* prototxt, const char* caffeModel);
|
||||
Net Net_ReadNetFromCaffeBytes(struct ByteArray prototxt, struct ByteArray caffeModel);
|
||||
Net Net_ReadNetFromTensorflow(const char* model);
|
||||
Net Net_ReadNetFromTensorflowBytes(struct ByteArray model);
|
||||
Net Net_ReadNetFromTorch(const char* model);
|
||||
Net Net_ReadNetFromONNX(const char* model);
|
||||
Net Net_ReadNetFromONNXBytes(struct ByteArray model);
|
||||
Mat Net_BlobFromImage(Mat image, double scalefactor, Size size, Scalar mean, bool swapRB,
|
||||
bool crop);
|
||||
void Net_BlobFromImages(struct Mats images, Mat blob, double scalefactor, Size size,
|
||||
@ -51,6 +54,9 @@ int Layer_OutputNameToIndex(Layer layer, const char* name);
|
||||
const char* Layer_GetName(Layer layer);
|
||||
const char* Layer_GetType(Layer layer);
|
||||
|
||||
void NMSBoxes(struct Rects bboxes, FloatVector scores, float score_threshold, float nms_threshold, IntVector* indices);
|
||||
void NMSBoxesWithParams(struct Rects bboxes, FloatVector scores, const float score_threshold, const float nms_threshold, IntVector* indices, const float eta, const int top_k);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
83
vendor/gocv.io/x/gocv/features2d.cpp
generated
vendored
83
vendor/gocv.io/x/gocv/features2d.cpp
generated
vendored
@ -413,6 +413,50 @@ struct MultiDMatches BFMatcher_KnnMatchWithParams(BFMatcher b, Mat query, Mat tr
|
||||
return ret;
|
||||
}
|
||||
|
||||
FlannBasedMatcher FlannBasedMatcher_Create() {
|
||||
return new cv::Ptr<cv::FlannBasedMatcher>(cv::FlannBasedMatcher::create());
|
||||
}
|
||||
|
||||
void FlannBasedMatcher_Close(FlannBasedMatcher f) {
|
||||
delete f;
|
||||
}
|
||||
|
||||
struct MultiDMatches FlannBasedMatcher_KnnMatch(FlannBasedMatcher f, Mat query, Mat train, int k) {
|
||||
std::vector< std::vector<cv::DMatch> > matches;
|
||||
(*f)->knnMatch(*query, *train, matches, k);
|
||||
|
||||
DMatches *dms = new DMatches[matches.size()];
|
||||
for (size_t i = 0; i < matches.size(); ++i) {
|
||||
DMatch *dmatches = new DMatch[matches[i].size()];
|
||||
for (size_t j = 0; j < matches[i].size(); ++j) {
|
||||
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
|
||||
matches[i][j].distance};
|
||||
dmatches[j] = dmatch;
|
||||
}
|
||||
dms[i] = {dmatches, (int) matches[i].size()};
|
||||
}
|
||||
MultiDMatches ret = {dms, (int) matches.size()};
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct MultiDMatches FlannBasedMatcher_KnnMatchWithParams(FlannBasedMatcher f, Mat query, Mat train, int k, Mat mask, bool compactResult) {
|
||||
std::vector< std::vector<cv::DMatch> > matches;
|
||||
(*f)->knnMatch(*query, *train, matches, k, *mask, compactResult);
|
||||
|
||||
DMatches *dms = new DMatches[matches.size()];
|
||||
for (size_t i = 0; i < matches.size(); ++i) {
|
||||
DMatch *dmatches = new DMatch[matches[i].size()];
|
||||
for (size_t j = 0; j < matches[i].size(); ++j) {
|
||||
DMatch dmatch = {matches[i][j].queryIdx, matches[i][j].trainIdx, matches[i][j].imgIdx,
|
||||
matches[i][j].distance};
|
||||
dmatches[j] = dmatch;
|
||||
}
|
||||
dms[i] = {dmatches, (int) matches[i].size()};
|
||||
}
|
||||
MultiDMatches ret = {dms, (int) matches.size()};
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DrawKeyPoints(Mat src, struct KeyPoints kp, Mat dst, Scalar s, int flags) {
|
||||
std::vector<cv::KeyPoint> keypts;
|
||||
cv::KeyPoint keypt;
|
||||
@ -471,3 +515,42 @@ struct KeyPoints SIFT_DetectAndCompute(SIFT d, Mat src, Mat mask, Mat desc) {
|
||||
KeyPoints ret = {kps, (int)detected.size()};
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DrawMatches(Mat img1, struct KeyPoints kp1, Mat img2, struct KeyPoints kp2, struct DMatches matches1to2, Mat outImg, const Scalar matchesColor, const Scalar pointColor, struct ByteArray matchesMask, int flags) {
|
||||
std::vector<cv::KeyPoint> kp1vec, kp2vec;
|
||||
cv::KeyPoint keypt;
|
||||
|
||||
for (int i = 0; i < kp1.length; ++i) {
|
||||
keypt = cv::KeyPoint(kp1.keypoints[i].x, kp1.keypoints[i].y,
|
||||
kp1.keypoints[i].size, kp1.keypoints[i].angle, kp1.keypoints[i].response,
|
||||
kp1.keypoints[i].octave, kp1.keypoints[i].classID);
|
||||
kp1vec.push_back(keypt);
|
||||
}
|
||||
|
||||
for (int i = 0; i < kp2.length; ++i) {
|
||||
keypt = cv::KeyPoint(kp2.keypoints[i].x, kp2.keypoints[i].y,
|
||||
kp2.keypoints[i].size, kp2.keypoints[i].angle, kp2.keypoints[i].response,
|
||||
kp2.keypoints[i].octave, kp2.keypoints[i].classID);
|
||||
kp2vec.push_back(keypt);
|
||||
}
|
||||
|
||||
cv::Scalar cvmatchescolor = cv::Scalar(matchesColor.val1, matchesColor.val2, matchesColor.val3, matchesColor.val4);
|
||||
cv::Scalar cvpointcolor = cv::Scalar(pointColor.val1, pointColor.val2, pointColor.val3, pointColor.val4);
|
||||
|
||||
std::vector<cv::DMatch> dmatchvec;
|
||||
cv::DMatch dm;
|
||||
|
||||
for (int i = 0; i < matches1to2.length; i++) {
|
||||
dm = cv::DMatch(matches1to2.dmatches[i].queryIdx, matches1to2.dmatches[i].trainIdx,
|
||||
matches1to2.dmatches[i].imgIdx, matches1to2.dmatches[i].distance);
|
||||
dmatchvec.push_back(dm);
|
||||
}
|
||||
|
||||
std::vector<char> maskvec;
|
||||
|
||||
for (int i = 0; i < matchesMask.length; i++) {
|
||||
maskvec.push_back(matchesMask.data[i]);
|
||||
}
|
||||
|
||||
cv::drawMatches(*img1, kp1vec, *img2, kp2vec, dmatchvec, *outImg, cvmatchescolor, cvpointcolor, maskvec, static_cast<cv::DrawMatchesFlags>(flags));
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user