chore: upgrade dependencies

This commit is contained in:
Cyrille Nofficial 2023-10-15 11:51:11 +02:00
parent 156cc5204d
commit 49aaa38f82
99 changed files with 2799 additions and 2511 deletions

15
go.mod
View File

@ -5,18 +5,17 @@ go 1.21
toolchain go1.21.3 toolchain go1.21.3
require ( require (
github.com/cyrilix/robocar-base v0.1.7 github.com/cyrilix/robocar-base v0.1.8
github.com/cyrilix/robocar-protobuf/go v1.4.0 github.com/cyrilix/robocar-protobuf/go v1.4.0
github.com/eclipse/paho.mqtt.golang v1.4.1 github.com/eclipse/paho.mqtt.golang v1.4.3
go.uber.org/zap v1.21.0 go.uber.org/zap v1.26.0
gocv.io/x/gocv v0.31.0 gocv.io/x/gocv v0.31.0
google.golang.org/protobuf v1.31.0 google.golang.org/protobuf v1.31.0
) )
require ( require (
github.com/gorilla/websocket v1.4.2 // indirect github.com/gorilla/websocket v1.5.0 // indirect
go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.10.0 // indirect
go.uber.org/multierr v1.6.0 // indirect golang.org/x/net v0.9.0 // indirect
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect golang.org/x/sync v0.1.0 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
) )

87
go.sum
View File

@ -1,83 +1,38 @@
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/cyrilix/robocar-base v0.1.8 h1:9hfH9rCcyGXR0dtESIhI9tCrK9juq+dSnJXCxCF2LVw=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/cyrilix/robocar-base v0.1.8/go.mod h1:oJnfYjoz2PX16BD8I8LJ14kRQt1zbFb7XaUHtUEZgjg=
github.com/cyrilix/robocar-base v0.1.7 h1:EVzZ0KjigSFpke5f3A/PybEH3WFUEIrYSc3z/dhOZ48=
github.com/cyrilix/robocar-base v0.1.7/go.mod h1:4E11HQSNy2NT8e7MW188y6ST9C0RzarKyn7sK/3V/Lk=
github.com/cyrilix/robocar-protobuf/go v1.4.0 h1:ZMN2zjn2iplsbHoBrjiI7d3HdNutWUB+NcVDh2mFcqM= github.com/cyrilix/robocar-protobuf/go v1.4.0 h1:ZMN2zjn2iplsbHoBrjiI7d3HdNutWUB+NcVDh2mFcqM=
github.com/cyrilix/robocar-protobuf/go v1.4.0/go.mod h1:69ZGmxS2JufIxGZPEKvAMZj5b1fVMVG3QTyFlCCHGtg= github.com/cyrilix/robocar-protobuf/go v1.4.0/go.mod h1:69ZGmxS2JufIxGZPEKvAMZj5b1fVMVG3QTyFlCCHGtg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/eclipse/paho.mqtt.golang v1.4.1 h1:tUSpviiL5G3P9SZZJPC4ZULZJsxQKXxfENpMvdbAXAI= github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQNCyp70xik=
github.com/eclipse/paho.mqtt.golang v1.4.1/go.mod h1:JGt0RsEwEX+Xa/agj90YJ9d9DH2b7upDZMK9HRbFvCA= github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hybridgroup/mjpeg v0.0.0-20140228234708-4680f319790e/go.mod h1:eagM805MRKrioHYuU7iKLUyFPVKqVV6um5DAvCkUtXs= github.com/hybridgroup/mjpeg v0.0.0-20140228234708-4680f319790e/go.mod h1:eagM805MRKrioHYuU7iKLUyFPVKqVV6um5DAvCkUtXs=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
gocv.io/x/gocv v0.31.0 h1:BHDtK8v+YPvoSPQTTiZB2fM/7BLg6511JqkruY2z6LQ= gocv.io/x/gocv v0.31.0 h1:BHDtK8v+YPvoSPQTTiZB2fM/7BLg6511JqkruY2z6LQ=
gocv.io/x/gocv v0.31.0/go.mod h1:oc6FvfYqfBp99p+yOEzs9tbYF9gOrAQSeL/dyIPefJU= gocv.io/x/gocv v0.31.0/go.mod h1:oc6FvfYqfBp99p+yOEzs9tbYF9gOrAQSeL/dyIPefJU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -113,7 +113,9 @@ identifier; this is as per the [spec](https://docs.oasis-open.org/mqtt/mqtt/v3.1
not received, disconnecting` errors). not received, disconnecting` errors).
* When QOS1+ subscriptions have been created previously and you connect with `CleanSession` set to false it is possible * When QOS1+ subscriptions have been created previously and you connect with `CleanSession` set to false it is possible
that the broker will deliver retained messages before `Subscribe` can be called. To process these messages either that the broker will deliver retained messages before `Subscribe` can be called. To process these messages either
configure a handler with `AddRoute` or set a `DefaultPublishHandler`. configure a handler with `AddRoute` or set a `DefaultPublishHandler`. If there is no handler (or `DefaultPublishHandler`)
then inbound messages will not be acknowledged. Adding a handler (even if it's `opts.SetDefaultPublishHandler(func(mqtt.Client, mqtt.Message) {})`)
is highly recommended to avoid inadvertently hitting inflight message limits.
* Loss of network connectivity may not be detected immediately. If this is an issue then consider setting * Loss of network connectivity may not be detected immediately. If this is an issue then consider setting
`ClientOptions.KeepAlive` (sends regular messages to check the link is active). `ClientOptions.KeepAlive` (sends regular messages to check the link is active).
* Reusing a `Client` is not completely safe. After calling `Disconnect` please create a new Client (`NewClient()`) rather * Reusing a `Client` is not completely safe. After calling `Disconnect` please create a new Client (`NewClient()`) rather
@ -193,4 +195,4 @@ Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list
General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt). General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt).
There is much more information available via the [MQTT community site](http://mqtt.org). There is much more information available via the [MQTT community site](http://mqtt.org).

104
vendor/github.com/eclipse/paho.mqtt.golang/backoff.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
/*
* Copyright (c) 2021 IBM Corp and others.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v2.0
* and Eclipse Distribution License v1.0 which accompany this distribution.
*
* The Eclipse Public License is available at
* https://www.eclipse.org/legal/epl-2.0/
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Matt Brittan
* Daichi Tomaru
*/
package mqtt
import (
"sync"
"time"
)
// Controller for sleep with backoff when the client attempts reconnection
// It has statuses for each situations cause reconnection.
type backoffController struct {
sync.RWMutex
statusMap map[string]*backoffStatus
}
type backoffStatus struct {
lastSleepPeriod time.Duration
lastErrorTime time.Time
}
func newBackoffController() *backoffController {
return &backoffController{
statusMap: map[string]*backoffStatus{},
}
}
// Calculate next sleep period from the specified parameters.
// Returned values are next sleep period and whether the error situation is continual.
// If connection errors continuouslly occurs, its sleep period is exponentially increased.
// Also if there is a lot of time between last and this error, sleep period is initialized.
func (b *backoffController) getBackoffSleepTime(
situation string, initSleepPeriod time.Duration, maxSleepPeriod time.Duration, processTime time.Duration, skipFirst bool,
) (time.Duration, bool) {
// Decide first sleep time if the situation is not continual.
var firstProcess = func(status *backoffStatus, init time.Duration, skip bool) (time.Duration, bool) {
if skip {
status.lastSleepPeriod = 0
return 0, false
}
status.lastSleepPeriod = init
return init, false
}
// Prioritize maxSleep.
if initSleepPeriod > maxSleepPeriod {
initSleepPeriod = maxSleepPeriod
}
b.Lock()
defer b.Unlock()
status, exist := b.statusMap[situation]
if !exist {
b.statusMap[situation] = &backoffStatus{initSleepPeriod, time.Now()}
return firstProcess(b.statusMap[situation], initSleepPeriod, skipFirst)
}
oldTime := status.lastErrorTime
status.lastErrorTime = time.Now()
// When there is a lot of time between last and this error, sleep period is initialized.
if status.lastErrorTime.Sub(oldTime) > (processTime * 2 + status.lastSleepPeriod) {
return firstProcess(status, initSleepPeriod, skipFirst)
}
if status.lastSleepPeriod == 0 {
status.lastSleepPeriod = initSleepPeriod
return initSleepPeriod, true
}
if nextSleepPeriod := status.lastSleepPeriod * 2; nextSleepPeriod <= maxSleepPeriod {
status.lastSleepPeriod = nextSleepPeriod
} else {
status.lastSleepPeriod = maxSleepPeriod
}
return status.lastSleepPeriod, true
}
// Execute sleep the time returned from getBackoffSleepTime.
func (b *backoffController) sleepWithBackoff(
situation string, initSleepPeriod time.Duration, maxSleepPeriod time.Duration, processTime time.Duration, skipFirst bool,
) (time.Duration, bool) {
sleep, isFirst := b.getBackoffSleepTime(situation, initSleepPeriod, maxSleepPeriod, processTime, skipFirst)
if sleep != 0 {
time.Sleep(sleep)
}
return sleep, isFirst
}

View File

@ -38,13 +38,6 @@ import (
"github.com/eclipse/paho.mqtt.golang/packets" "github.com/eclipse/paho.mqtt.golang/packets"
) )
const (
disconnected uint32 = iota
connecting
reconnecting
connected
)
// Client is the interface definition for a Client as used by this // Client is the interface definition for a Client as used by this
// library, the interface is primarily to allow mocking tests. // library, the interface is primarily to allow mocking tests.
// //
@ -52,9 +45,12 @@ const (
// with an MQTT server using non-blocking methods that allow work // with an MQTT server using non-blocking methods that allow work
// to be done in the background. // to be done in the background.
// An application may connect to an MQTT server using: // An application may connect to an MQTT server using:
// A plain TCP socket //
// A secure SSL/TLS socket // A plain TCP socket (e.g. mqtt://test.mosquitto.org:1833)
// A websocket // A secure SSL/TLS socket (e.g. tls://test.mosquitto.org:8883)
// A websocket (e.g ws://test.mosquitto.org:8080 or wss://test.mosquitto.org:8081)
// Something else (using `options.CustomOpenConnectionFn`)
//
// To enable ensured message delivery at Quality of Service (QoS) levels // To enable ensured message delivery at Quality of Service (QoS) levels
// described in the MQTT spec, a message persistence mechanism must be // described in the MQTT spec, a message persistence mechanism must be
// used. This is done by providing a type which implements the Store // used. This is done by providing a type which implements the Store
@ -128,8 +124,7 @@ type client struct {
lastReceived atomic.Value // time.Time - the last time a packet was successfully received from network lastReceived atomic.Value // time.Time - the last time a packet was successfully received from network
pingOutstanding int32 // set to 1 if a ping has been sent but response not ret received pingOutstanding int32 // set to 1 if a ping has been sent but response not ret received
status uint32 // see const definitions at top of file for possible values status connectionStatus // see constants in status.go for values
sync.RWMutex // Protects the above two variables (note: atomic writes are also used somewhat inconsistently)
messageIds // effectively a map from message id to token completor messageIds // effectively a map from message id to token completor
@ -146,6 +141,8 @@ type client struct {
stop chan struct{} // Closed to request that workers stop stop chan struct{} // Closed to request that workers stop
workers sync.WaitGroup // used to wait for workers to complete (ping, keepalive, errwatch, resume) workers sync.WaitGroup // used to wait for workers to complete (ping, keepalive, errwatch, resume)
commsStopped chan struct{} // closed when the comms routines have stopped (kept running until after workers have closed to avoid deadlocks) commsStopped chan struct{} // closed when the comms routines have stopped (kept running until after workers have closed to avoid deadlocks)
backoff *backoffController
} }
// NewClient will create an MQTT v3.1.1 client with all of the options specified // NewClient will create an MQTT v3.1.1 client with all of the options specified
@ -169,12 +166,12 @@ func NewClient(o *ClientOptions) Client {
c.options.protocolVersionExplicit = false c.options.protocolVersionExplicit = false
} }
c.persist = c.options.Store c.persist = c.options.Store
c.status = disconnected
c.messageIds = messageIds{index: make(map[uint16]tokenCompletor)} c.messageIds = messageIds{index: make(map[uint16]tokenCompletor)}
c.msgRouter = newRouter() c.msgRouter = newRouter()
c.msgRouter.setDefaultHandler(c.options.DefaultPublishHandler) c.msgRouter.setDefaultHandler(c.options.DefaultPublishHandler)
c.obound = make(chan *PacketAndToken) c.obound = make(chan *PacketAndToken)
c.oboundP = make(chan *PacketAndToken) c.oboundP = make(chan *PacketAndToken)
c.backoff = newBackoffController()
return c return c
} }
@ -196,47 +193,27 @@ func (c *client) AddRoute(topic string, callback MessageHandler) {
// the client is connected or not. // the client is connected or not.
// connected means that the connection is up now OR it will // connected means that the connection is up now OR it will
// be established/reestablished automatically when possible // be established/reestablished automatically when possible
// Warning: The connection status may change at any time so use this with care!
func (c *client) IsConnected() bool { func (c *client) IsConnected() bool {
c.RLock() // This will need to change if additional statuses are added
defer c.RUnlock() s, r := c.status.ConnectionStatusRetry()
status := atomic.LoadUint32(&c.status)
switch { switch {
case status == connected: case s == connected:
return true return true
case c.options.AutoReconnect && status > connecting: case c.options.ConnectRetry && s == connecting:
return true
case c.options.ConnectRetry && status == connecting:
return true return true
case c.options.AutoReconnect:
return s == reconnecting || (s == disconnecting && r) // r indicates we will reconnect
default: default:
return false return false
} }
} }
// IsConnectionOpen return a bool signifying whether the client has an active // IsConnectionOpen return a bool signifying whether the client has an active
// connection to mqtt broker, i.e not in disconnected or reconnect mode // connection to mqtt broker, i.e. not in disconnected or reconnect mode
// Warning: The connection status may change at any time so use this with care!
func (c *client) IsConnectionOpen() bool { func (c *client) IsConnectionOpen() bool {
c.RLock() return c.status.ConnectionStatus() == connected
defer c.RUnlock()
status := atomic.LoadUint32(&c.status)
switch {
case status == connected:
return true
default:
return false
}
}
func (c *client) connectionStatus() uint32 {
c.RLock()
defer c.RUnlock()
status := atomic.LoadUint32(&c.status)
return status
}
func (c *client) setConnected(status uint32) {
c.Lock()
defer c.Unlock()
atomic.StoreUint32(&c.status, status)
} }
// ErrNotConnected is the error returned from function calls that are // ErrNotConnected is the error returned from function calls that are
@ -253,25 +230,31 @@ func (c *client) Connect() Token {
t := newToken(packets.Connect).(*ConnectToken) t := newToken(packets.Connect).(*ConnectToken)
DEBUG.Println(CLI, "Connect()") DEBUG.Println(CLI, "Connect()")
if c.options.ConnectRetry && atomic.LoadUint32(&c.status) != disconnected { connectionUp, err := c.status.Connecting()
// if in any state other than disconnected and ConnectRetry is if err != nil {
// enabled then the connection will come up automatically if err == errAlreadyConnectedOrReconnecting && c.options.AutoReconnect {
// client can assume connection is up // When reconnection is active we don't consider calls tro Connect to ba an error (mainly for compatability)
WARN.Println(CLI, "Connect() called but not disconnected") WARN.Println(CLI, "Connect() called but not disconnected")
t.returnCode = packets.Accepted t.returnCode = packets.Accepted
t.flowComplete() t.flowComplete()
return t
}
ERROR.Println(CLI, err) // CONNECT should never be called unless we are disconnected
t.setError(err)
return t return t
} }
c.persist.Open() c.persist.Open()
if c.options.ConnectRetry { if c.options.ConnectRetry {
c.reserveStoredPublishIDs() // Reserve IDs to allow publish before connect complete c.reserveStoredPublishIDs() // Reserve IDs to allow publishing before connect complete
} }
c.setConnected(connecting)
go func() { go func() {
if len(c.options.Servers) == 0 { if len(c.options.Servers) == 0 {
t.setError(fmt.Errorf("no servers defined to connect to")) t.setError(fmt.Errorf("no servers defined to connect to"))
if err := connectionUp(false); err != nil {
ERROR.Println(CLI, err.Error())
}
return return
} }
@ -285,26 +268,28 @@ func (c *client) Connect() Token {
DEBUG.Println(CLI, "Connect failed, sleeping for", int(c.options.ConnectRetryInterval.Seconds()), "seconds and will then retry, error:", err.Error()) DEBUG.Println(CLI, "Connect failed, sleeping for", int(c.options.ConnectRetryInterval.Seconds()), "seconds and will then retry, error:", err.Error())
time.Sleep(c.options.ConnectRetryInterval) time.Sleep(c.options.ConnectRetryInterval)
if atomic.LoadUint32(&c.status) == connecting { if c.status.ConnectionStatus() == connecting { // Possible connection aborted elsewhere
goto RETRYCONN goto RETRYCONN
} }
} }
ERROR.Println(CLI, "Failed to connect to a broker") ERROR.Println(CLI, "Failed to connect to a broker")
c.setConnected(disconnected)
c.persist.Close() c.persist.Close()
t.returnCode = rc t.returnCode = rc
t.setError(err) t.setError(err)
if err := connectionUp(false); err != nil {
ERROR.Println(CLI, err.Error())
}
return return
} }
inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing
if c.startCommsWorkers(conn, inboundFromStore) { if c.startCommsWorkers(conn, connectionUp, inboundFromStore) { // note that this takes care of updating the status (to connected or disconnected)
// Take care of any messages in the store // Take care of any messages in the store
if !c.options.CleanSession { if !c.options.CleanSession {
c.resume(c.options.ResumeSubs, inboundFromStore) c.resume(c.options.ResumeSubs, inboundFromStore)
} else { } else {
c.persist.Reset() c.persist.Reset()
} }
} else { } else { // Note: With the new status subsystem this should only happen if Disconnect called simultaneously with the above
WARN.Println(CLI, "Connect() called but connection established in another goroutine") WARN.Println(CLI, "Connect() called but connection established in another goroutine")
} }
@ -316,13 +301,20 @@ func (c *client) Connect() Token {
} }
// internal function used to reconnect the client when it loses its connection // internal function used to reconnect the client when it loses its connection
func (c *client) reconnect() { // The connection status MUST be reconnecting prior to calling this function (via call to status.connectionLost)
func (c *client) reconnect(connectionUp connCompletedFn) {
DEBUG.Println(CLI, "enter reconnect") DEBUG.Println(CLI, "enter reconnect")
var ( var (
sleep = 1 * time.Second initSleep = 1 * time.Second
conn net.Conn conn net.Conn
) )
// If the reason of connection lost is same as the before one, sleep timer is set before attempting connection is started.
// Sleep time is exponentially increased as the same situation continues
if slp, isContinual := c.backoff.sleepWithBackoff("connectionLost", initSleep, c.options.MaxReconnectInterval, 3 * time.Second, true); isContinual {
DEBUG.Println(CLI, "Detect continual connection lost after reconnect, slept for", int(slp.Seconds()), "seconds")
}
for { for {
if nil != c.options.OnReconnecting { if nil != c.options.OnReconnecting {
c.options.OnReconnecting(c, &c.options) c.options.OnReconnecting(c, &c.options)
@ -332,32 +324,20 @@ func (c *client) reconnect() {
if err == nil { if err == nil {
break break
} }
DEBUG.Println(CLI, "Reconnect failed, sleeping for", int(sleep.Seconds()), "seconds:", err) sleep, _ := c.backoff.sleepWithBackoff("attemptReconnection", initSleep, c.options.MaxReconnectInterval, c.options.ConnectTimeout, false)
time.Sleep(sleep) DEBUG.Println(CLI, "Reconnect failed, slept for", int(sleep.Seconds()), "seconds:", err)
if sleep < c.options.MaxReconnectInterval {
sleep *= 2
}
if sleep > c.options.MaxReconnectInterval { if c.status.ConnectionStatus() != reconnecting { // Disconnect may have been called
sleep = c.options.MaxReconnectInterval if err := connectionUp(false); err != nil { // Should always return an error
} ERROR.Println(CLI, err.Error())
// Disconnect may have been called }
if atomic.LoadUint32(&c.status) == disconnected { DEBUG.Println(CLI, "Client moved to disconnected state while reconnecting, abandoning reconnect")
break return
} }
} }
// Disconnect() must have been called while we were trying to reconnect. inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing
if c.connectionStatus() == disconnected { if c.startCommsWorkers(conn, connectionUp, inboundFromStore) { // note that this takes care of updating the status (to connected or disconnected)
if conn != nil {
conn.Close()
}
DEBUG.Println(CLI, "Client moved to disconnected state while reconnecting, abandoning reconnect")
return
}
inboundFromStore := make(chan packets.ControlPacket) // there may be some inbound comms packets in the store that are awaiting processing
if c.startCommsWorkers(conn, inboundFromStore) {
c.resume(c.options.ResumeSubs, inboundFromStore) c.resume(c.options.ResumeSubs, inboundFromStore)
} }
close(inboundFromStore) close(inboundFromStore)
@ -392,6 +372,7 @@ func (c *client) attemptConnection() (net.Conn, byte, bool, error) {
DEBUG.Println(CLI, "using custom onConnectAttempt handler...") DEBUG.Println(CLI, "using custom onConnectAttempt handler...")
tlsCfg = c.options.OnConnectAttempt(broker, c.options.TLSConfig) tlsCfg = c.options.OnConnectAttempt(broker, c.options.TLSConfig)
} }
connDeadline := time.Now().Add(c.options.ConnectTimeout) // Time by which connection must be established
dialer := c.options.Dialer dialer := c.options.Dialer
if dialer == nil { // if dialer == nil { //
WARN.Println(CLI, "dialer was nil, using default") WARN.Println(CLI, "dialer was nil, using default")
@ -411,16 +392,23 @@ func (c *client) attemptConnection() (net.Conn, byte, bool, error) {
} }
DEBUG.Println(CLI, "socket connected to broker") DEBUG.Println(CLI, "socket connected to broker")
// Now we send the perform the MQTT connection handshake // Now we perform the MQTT connection handshake ensuring that it does not exceed the timeout
if err := conn.SetDeadline(connDeadline); err != nil {
ERROR.Println(CLI, "set deadline for handshake ", err)
}
// Now we perform the MQTT connection handshake
rc, sessionPresent, err = connectMQTT(conn, cm, protocolVersion) rc, sessionPresent, err = connectMQTT(conn, cm, protocolVersion)
if rc == packets.Accepted { if rc == packets.Accepted {
if err := conn.SetDeadline(time.Time{}); err != nil {
ERROR.Println(CLI, "reset deadline following handshake ", err)
}
break // successfully connected break // successfully connected
} }
// We may be have to attempt the connection with MQTT 3.1 // We may have to attempt the connection with MQTT 3.1
if conn != nil { _ = conn.Close()
_ = conn.Close()
}
if !c.options.protocolVersionExplicit && protocolVersion == 4 { // try falling back to 3.1? if !c.options.protocolVersionExplicit && protocolVersion == 4 { // try falling back to 3.1?
DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol")
protocolVersion = 3 protocolVersion = 3
@ -452,43 +440,59 @@ func (c *client) attemptConnection() (net.Conn, byte, bool, error) {
// reusing the `client` may lead to panics. If you want to reconnect when the connection drops then use // reusing the `client` may lead to panics. If you want to reconnect when the connection drops then use
// `SetAutoReconnect` and/or `SetConnectRetry`options instead of implementing this yourself. // `SetAutoReconnect` and/or `SetConnectRetry`options instead of implementing this yourself.
func (c *client) Disconnect(quiesce uint) { func (c *client) Disconnect(quiesce uint) {
defer c.disconnect() done := make(chan struct{}) // Simplest way to ensure quiesce is always honoured
go func() {
defer close(done)
disDone, err := c.status.Disconnecting()
if err != nil {
// Status has been set to disconnecting, but we had to wait for something else to complete
WARN.Println(CLI, err.Error())
return
}
defer func() {
c.disconnect() // Force disconnection
disDone() // Update status
}()
DEBUG.Println(CLI, "disconnecting")
dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
dt := newToken(packets.Disconnect)
select {
case c.oboundP <- &PacketAndToken{p: dm, t: dt}:
// wait for work to finish, or quiesce time consumed
DEBUG.Println(CLI, "calling WaitTimeout")
dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond)
DEBUG.Println(CLI, "WaitTimeout done")
// Below code causes a potential data race. Following status refactor it should no longer be required
// but leaving in as need to check code further.
// case <-c.commsStopped:
// WARN.Println("Disconnect packet could not be sent because comms stopped")
case <-time.After(time.Duration(quiesce) * time.Millisecond):
WARN.Println("Disconnect packet not sent due to timeout")
}
}()
status := atomic.LoadUint32(&c.status) // Return when done or after timeout expires (would like to change but this maintains compatibility)
c.setConnected(disconnected) delay := time.NewTimer(time.Duration(quiesce) * time.Millisecond)
if status != connected {
WARN.Println(CLI, "Disconnect() called but not connected (disconnected/reconnecting)")
return
}
DEBUG.Println(CLI, "disconnecting")
dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
dt := newToken(packets.Disconnect)
select { select {
case c.oboundP <- &PacketAndToken{p: dm, t: dt}: case <-done:
// wait for work to finish, or quiesce time consumed if !delay.Stop() {
DEBUG.Println(CLI, "calling WaitTimeout") <-delay.C
dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond) }
DEBUG.Println(CLI, "WaitTimeout done") case <-delay.C:
// Let's comment this chunk of code until we are able to safely read this variable
// without data races.
// case <-c.commsStopped:
// WARN.Println("Disconnect packet could not be sent because comms stopped")
case <-time.After(time.Duration(quiesce) * time.Millisecond):
WARN.Println("Disconnect packet not sent due to timeout")
} }
} }
// forceDisconnect will end the connection with the mqtt broker immediately (used for tests only) // forceDisconnect will end the connection with the mqtt broker immediately (used for tests only)
func (c *client) forceDisconnect() { func (c *client) forceDisconnect() {
if !c.IsConnected() { disDone, err := c.status.Disconnecting()
WARN.Println(CLI, "already disconnected") if err != nil {
// Possible that we are not actually connected
WARN.Println(CLI, err.Error())
return return
} }
c.setConnected(disconnected)
DEBUG.Println(CLI, "forcefully disconnecting") DEBUG.Println(CLI, "forcefully disconnecting")
c.disconnect() c.disconnect()
disDone()
} }
// disconnect cleans up after a final disconnection (user requested so no auto reconnection) // disconnect cleans up after a final disconnection (user requested so no auto reconnection)
@ -505,49 +509,79 @@ func (c *client) disconnect() {
// internalConnLost cleanup when connection is lost or an error occurs // internalConnLost cleanup when connection is lost or an error occurs
// Note: This function will not block // Note: This function will not block
func (c *client) internalConnLost(err error) { func (c *client) internalConnLost(whyConnLost error) {
// It is possible that internalConnLost will be called multiple times simultaneously // It is possible that internalConnLost will be called multiple times simultaneously
// (including after sending a DisconnectPacket) as such we only do cleanup etc if the // (including after sending a DisconnectPacket) as such we only do cleanup etc if the
// routines were actually running and are not being disconnected at users request // routines were actually running and are not being disconnected at users request
DEBUG.Println(CLI, "internalConnLost called") DEBUG.Println(CLI, "internalConnLost called")
stopDone := c.stopCommsWorkers() disDone, err := c.status.ConnectionLost(c.options.AutoReconnect && c.status.ConnectionStatus() > connecting)
if stopDone != nil { // stopDone will be nil if workers already in the process of stopping or stopped if err != nil {
go func() { if err == errConnLossWhileDisconnecting || err == errAlreadyHandlingConnectionLoss {
DEBUG.Println(CLI, "internalConnLost waiting on workers") return // Loss of connection is expected or already being handled
<-stopDone }
DEBUG.Println(CLI, "internalConnLost workers stopped") ERROR.Println(CLI, fmt.Sprintf("internalConnLost unexpected status: %s", err.Error()))
// It is possible that Disconnect was called which led to this error so reconnection depends upon status return
reconnect := c.options.AutoReconnect && c.connectionStatus() > connecting
if c.options.CleanSession && !reconnect {
c.messageIds.cleanUp() // completes PUB/SUB/UNSUB tokens
} else if !c.options.ResumeSubs {
c.messageIds.cleanUpSubscribe() // completes SUB/UNSUB tokens
}
if reconnect {
c.setConnected(reconnecting)
go c.reconnect()
} else {
c.setConnected(disconnected)
}
if c.options.OnConnectionLost != nil {
go c.options.OnConnectionLost(c, err)
}
DEBUG.Println(CLI, "internalConnLost complete")
}()
} }
// c.stopCommsWorker returns a channel that is closed when the operation completes. This was required prior
// to the implementation of proper status management but has been left in place, for now, to minimise change
stopDone := c.stopCommsWorkers()
// stopDone was required in previous versions because there was no connectionLost status (and there were
// issues with status handling). This code has been left in place for the time being just in case the new
// status handling contains bugs (refactoring required at some point).
if stopDone == nil { // stopDone will be nil if workers already in the process of stopping or stopped
ERROR.Println(CLI, "internalConnLost stopDone unexpectedly nil - BUG BUG")
// Cannot really do anything other than leave things disconnected
if _, err = disDone(false); err != nil { // Safest option - cannot leave status as connectionLost
ERROR.Println(CLI, fmt.Sprintf("internalConnLost failed to set status to disconnected (stopDone): %s", err.Error()))
}
return
}
// It may take a while for the disconnection to complete whatever called us needs to exit cleanly so finnish in goRoutine
go func() {
DEBUG.Println(CLI, "internalConnLost waiting on workers")
<-stopDone
DEBUG.Println(CLI, "internalConnLost workers stopped")
reConnDone, err := disDone(true)
if err != nil {
ERROR.Println(CLI, "failure whilst reporting completion of disconnect", err)
} else if reConnDone == nil { // Should never happen
ERROR.Println(CLI, "BUG BUG BUG reconnection function is nil", err)
}
reconnect := err == nil && reConnDone != nil
if c.options.CleanSession && !reconnect {
c.messageIds.cleanUp() // completes PUB/SUB/UNSUB tokens
} else if !c.options.ResumeSubs {
c.messageIds.cleanUpSubscribe() // completes SUB/UNSUB tokens
}
if reconnect {
go c.reconnect(reConnDone) // Will set connection status to reconnecting
}
if c.options.OnConnectionLost != nil {
go c.options.OnConnectionLost(c, whyConnLost)
}
DEBUG.Println(CLI, "internalConnLost complete")
}()
} }
// startCommsWorkers is called when the connection is up. // startCommsWorkers is called when the connection is up.
// It starts off all of the routines needed to process incoming and outgoing messages. // It starts off the routines needed to process incoming and outgoing messages.
// Returns true if the comms workers were started (i.e. they were not already running) // Returns true if the comms workers were started (i.e. successful connection)
func (c *client) startCommsWorkers(conn net.Conn, inboundFromStore <-chan packets.ControlPacket) bool { // connectionUp(true) will be called once everything is up; connectionUp(false) will be called on failure
func (c *client) startCommsWorkers(conn net.Conn, connectionUp connCompletedFn, inboundFromStore <-chan packets.ControlPacket) bool {
DEBUG.Println(CLI, "startCommsWorkers called") DEBUG.Println(CLI, "startCommsWorkers called")
c.connMu.Lock() c.connMu.Lock()
defer c.connMu.Unlock() defer c.connMu.Unlock()
if c.conn != nil { if c.conn != nil { // Should never happen due to new status handling; leaving in for safety for the time being
WARN.Println(CLI, "startCommsWorkers called when commsworkers already running") WARN.Println(CLI, "startCommsWorkers called when commsworkers already running BUG BUG")
conn.Close() // No use for the new network connection _ = conn.Close() // No use for the new network connection
if err := connectionUp(false); err != nil {
ERROR.Println(CLI, err.Error())
}
return false return false
} }
c.conn = conn // Store the connection c.conn = conn // Store the connection
@ -567,7 +601,17 @@ func (c *client) startCommsWorkers(conn net.Conn, inboundFromStore <-chan packet
c.workers.Add(1) // Done will be called when ackOut is closed c.workers.Add(1) // Done will be called when ackOut is closed
ackOut := c.msgRouter.matchAndDispatch(incomingPubChan, c.options.Order, c) ackOut := c.msgRouter.matchAndDispatch(incomingPubChan, c.options.Order, c)
c.setConnected(connected) // The connection is now ready for use (we spin up a few go routines below). It is possible that
// Disconnect has been called in the interim...
if err := connectionUp(true); err != nil {
DEBUG.Println(CLI, err)
close(c.stop) // Tidy up anything we have already started
close(incomingPubChan)
c.workers.Wait()
c.conn.Close()
c.conn = nil
return false
}
DEBUG.Println(CLI, "client is connected/reconnected") DEBUG.Println(CLI, "client is connected/reconnected")
if c.options.OnConnect != nil { if c.options.OnConnect != nil {
go c.options.OnConnect(c) go c.options.OnConnect(c)
@ -660,8 +704,9 @@ func (c *client) startCommsWorkers(conn net.Conn, inboundFromStore <-chan packet
} }
// stopWorkersAndComms - Cleanly shuts down worker go routines (including the comms routines) and waits until everything has stopped // stopWorkersAndComms - Cleanly shuts down worker go routines (including the comms routines) and waits until everything has stopped
// Returns nil it workers did not need to be stopped; otherwise returns a channel which will be closed when the stop is complete // Returns nil if workers did not need to be stopped; otherwise returns a channel which will be closed when the stop is complete
// Note: This may block so run as a go routine if calling from any of the comms routines // Note: This may block so run as a go routine if calling from any of the comms routines
// Note2: It should be possible to simplify this now that the new status management code is in place.
func (c *client) stopCommsWorkers() chan struct{} { func (c *client) stopCommsWorkers() chan struct{} {
DEBUG.Println(CLI, "stopCommsWorkers called") DEBUG.Println(CLI, "stopCommsWorkers called")
// It is possible that this function will be called multiple times simultaneously due to the way things get shutdown // It is possible that this function will be called multiple times simultaneously due to the way things get shutdown
@ -710,7 +755,8 @@ func (c *client) Publish(topic string, qos byte, retained bool, payload interfac
case !c.IsConnected(): case !c.IsConnected():
token.setError(ErrNotConnected) token.setError(ErrNotConnected)
return token return token
case c.connectionStatus() == reconnecting && qos == 0: case c.status.ConnectionStatus() == reconnecting && qos == 0:
// message written to store and will be sent when connection comes up
token.flowComplete() token.flowComplete()
return token return token
} }
@ -740,11 +786,13 @@ func (c *client) Publish(topic string, qos byte, retained bool, payload interfac
token.messageID = mID token.messageID = mID
} }
persistOutbound(c.persist, pub) persistOutbound(c.persist, pub)
switch c.connectionStatus() { switch c.status.ConnectionStatus() {
case connecting: case connecting:
DEBUG.Println(CLI, "storing publish message (connecting), topic:", topic) DEBUG.Println(CLI, "storing publish message (connecting), topic:", topic)
case reconnecting: case reconnecting:
DEBUG.Println(CLI, "storing publish message (reconnecting), topic:", topic) DEBUG.Println(CLI, "storing publish message (reconnecting), topic:", topic)
case disconnecting:
DEBUG.Println(CLI, "storing publish message (disconnecting), topic:", topic)
default: default:
DEBUG.Println(CLI, "sending publish message, topic:", topic) DEBUG.Println(CLI, "sending publish message, topic:", topic)
publishWaitTimeout := c.options.WriteTimeout publishWaitTimeout := c.options.WriteTimeout
@ -777,11 +825,11 @@ func (c *client) Subscribe(topic string, qos byte, callback MessageHandler) Toke
if !c.IsConnectionOpen() { if !c.IsConnectionOpen() {
switch { switch {
case !c.options.ResumeSubs: case !c.options.ResumeSubs:
// if not connected and resumesubs not set this sub will be thrown away // if not connected and resumeSubs not set this sub will be thrown away
token.setError(fmt.Errorf("not currently connected and ResumeSubs not set")) token.setError(fmt.Errorf("not currently connected and ResumeSubs not set"))
return token return token
case c.options.CleanSession && c.connectionStatus() == reconnecting: case c.options.CleanSession && c.status.ConnectionStatus() == reconnecting:
// if reconnecting and cleansession is true this sub will be thrown away // if reconnecting and cleanSession is true this sub will be thrown away
token.setError(fmt.Errorf("reconnecting state and cleansession is true")) token.setError(fmt.Errorf("reconnecting state and cleansession is true"))
return token return token
} }
@ -822,11 +870,13 @@ func (c *client) Subscribe(topic string, qos byte, callback MessageHandler) Toke
if c.options.ResumeSubs { // Only persist if we need this to resume subs after a disconnection if c.options.ResumeSubs { // Only persist if we need this to resume subs after a disconnection
persistOutbound(c.persist, sub) persistOutbound(c.persist, sub)
} }
switch c.connectionStatus() { switch c.status.ConnectionStatus() {
case connecting: case connecting:
DEBUG.Println(CLI, "storing subscribe message (connecting), topic:", topic) DEBUG.Println(CLI, "storing subscribe message (connecting), topic:", topic)
case reconnecting: case reconnecting:
DEBUG.Println(CLI, "storing subscribe message (reconnecting), topic:", topic) DEBUG.Println(CLI, "storing subscribe message (reconnecting), topic:", topic)
case disconnecting:
DEBUG.Println(CLI, "storing subscribe message (disconnecting), topic:", topic)
default: default:
DEBUG.Println(CLI, "sending subscribe message, topic:", topic) DEBUG.Println(CLI, "sending subscribe message, topic:", topic)
subscribeWaitTimeout := c.options.WriteTimeout subscribeWaitTimeout := c.options.WriteTimeout
@ -864,8 +914,8 @@ func (c *client) SubscribeMultiple(filters map[string]byte, callback MessageHand
// if not connected and resumesubs not set this sub will be thrown away // if not connected and resumesubs not set this sub will be thrown away
token.setError(fmt.Errorf("not currently connected and ResumeSubs not set")) token.setError(fmt.Errorf("not currently connected and ResumeSubs not set"))
return token return token
case c.options.CleanSession && c.connectionStatus() == reconnecting: case c.options.CleanSession && c.status.ConnectionStatus() == reconnecting:
// if reconnecting and cleansession is true this sub will be thrown away // if reconnecting and cleanSession is true this sub will be thrown away
token.setError(fmt.Errorf("reconnecting state and cleansession is true")) token.setError(fmt.Errorf("reconnecting state and cleansession is true"))
return token return token
} }
@ -896,11 +946,13 @@ func (c *client) SubscribeMultiple(filters map[string]byte, callback MessageHand
if c.options.ResumeSubs { // Only persist if we need this to resume subs after a disconnection if c.options.ResumeSubs { // Only persist if we need this to resume subs after a disconnection
persistOutbound(c.persist, sub) persistOutbound(c.persist, sub)
} }
switch c.connectionStatus() { switch c.status.ConnectionStatus() {
case connecting: case connecting:
DEBUG.Println(CLI, "storing subscribe message (connecting), topics:", sub.Topics) DEBUG.Println(CLI, "storing subscribe message (connecting), topics:", sub.Topics)
case reconnecting: case reconnecting:
DEBUG.Println(CLI, "storing subscribe message (reconnecting), topics:", sub.Topics) DEBUG.Println(CLI, "storing subscribe message (reconnecting), topics:", sub.Topics)
case disconnecting:
DEBUG.Println(CLI, "storing subscribe message (disconnecting), topics:", sub.Topics)
default: default:
DEBUG.Println(CLI, "sending subscribe message, topics:", sub.Topics) DEBUG.Println(CLI, "sending subscribe message, topics:", sub.Topics)
subscribeWaitTimeout := c.options.WriteTimeout subscribeWaitTimeout := c.options.WriteTimeout
@ -1050,7 +1102,7 @@ func (c *client) resume(subscription bool, ibound chan packets.ControlPacket) {
} }
releaseSemaphore(token) // If limiting simultaneous messages then we need to know when message is acknowledged releaseSemaphore(token) // If limiting simultaneous messages then we need to know when message is acknowledged
default: default:
ERROR.Println(STR, "invalid message type in store (discarded)") ERROR.Println(STR, fmt.Sprintf("invalid message type (inbound - %T) in store (discarded)", packet))
c.persist.Del(key) c.persist.Del(key)
} }
} else { } else {
@ -1064,7 +1116,7 @@ func (c *client) resume(subscription bool, ibound chan packets.ControlPacket) {
return return
} }
default: default:
ERROR.Println(STR, "invalid message type in store (discarded)") ERROR.Println(STR, fmt.Sprintf("invalid message type (%T) in store (discarded)", packet))
c.persist.Del(key) c.persist.Del(key)
} }
} }
@ -1085,11 +1137,11 @@ func (c *client) Unsubscribe(topics ...string) Token {
if !c.IsConnectionOpen() { if !c.IsConnectionOpen() {
switch { switch {
case !c.options.ResumeSubs: case !c.options.ResumeSubs:
// if not connected and resumesubs not set this unsub will be thrown away // if not connected and resumeSubs not set this unsub will be thrown away
token.setError(fmt.Errorf("not currently connected and ResumeSubs not set")) token.setError(fmt.Errorf("not currently connected and ResumeSubs not set"))
return token return token
case c.options.CleanSession && c.connectionStatus() == reconnecting: case c.options.CleanSession && c.status.ConnectionStatus() == reconnecting:
// if reconnecting and cleansession is true this unsub will be thrown away // if reconnecting and cleanSession is true this unsub will be thrown away
token.setError(fmt.Errorf("reconnecting state and cleansession is true")) token.setError(fmt.Errorf("reconnecting state and cleansession is true"))
return token return token
} }
@ -1112,11 +1164,13 @@ func (c *client) Unsubscribe(topics ...string) Token {
persistOutbound(c.persist, unsub) persistOutbound(c.persist, unsub)
} }
switch c.connectionStatus() { switch c.status.ConnectionStatus() {
case connecting: case connecting:
DEBUG.Println(CLI, "storing unsubscribe message (connecting), topics:", topics) DEBUG.Println(CLI, "storing unsubscribe message (connecting), topics:", topics)
case reconnecting: case reconnecting:
DEBUG.Println(CLI, "storing unsubscribe message (reconnecting), topics:", topics) DEBUG.Println(CLI, "storing unsubscribe message (reconnecting), topics:", topics)
case disconnecting:
DEBUG.Println(CLI, "storing unsubscribe message (reconnecting), topics:", topics)
default: default:
DEBUG.Println(CLI, "sending unsubscribe message, topics:", topics) DEBUG.Println(CLI, "sending unsubscribe message, topics:", topics)
subscribeWaitTimeout := c.options.WriteTimeout subscribeWaitTimeout := c.options.WriteTimeout

View File

@ -31,7 +31,7 @@ import (
type MId uint16 type MId uint16
type messageIds struct { type messageIds struct {
sync.RWMutex mu sync.RWMutex // Named to prevent Mu from being accessible directly via client
index map[uint16]tokenCompletor index map[uint16]tokenCompletor
lastIssuedID uint16 // The most recently issued ID. Used so we cycle through ids rather than immediately reusing them (can make debugging easier) lastIssuedID uint16 // The most recently issued ID. Used so we cycle through ids rather than immediately reusing them (can make debugging easier)
@ -44,7 +44,7 @@ const (
// cleanup clears the message ID map; completes all token types and sets error on PUB, SUB and UNSUB tokens. // cleanup clears the message ID map; completes all token types and sets error on PUB, SUB and UNSUB tokens.
func (mids *messageIds) cleanUp() { func (mids *messageIds) cleanUp() {
mids.Lock() mids.mu.Lock()
for _, token := range mids.index { for _, token := range mids.index {
switch token.(type) { switch token.(type) {
case *PublishToken: case *PublishToken:
@ -59,14 +59,14 @@ func (mids *messageIds) cleanUp() {
token.flowComplete() token.flowComplete()
} }
mids.index = make(map[uint16]tokenCompletor) mids.index = make(map[uint16]tokenCompletor)
mids.Unlock() mids.mu.Unlock()
DEBUG.Println(MID, "cleaned up") DEBUG.Println(MID, "cleaned up")
} }
// cleanUpSubscribe removes all SUBSCRIBE and UNSUBSCRIBE tokens (setting error) // cleanUpSubscribe removes all SUBSCRIBE and UNSUBSCRIBE tokens (setting error)
// This may be called when the connection is lost, and we will not be resending SUB/UNSUB packets // This may be called when the connection is lost, and we will not be resending SUB/UNSUB packets
func (mids *messageIds) cleanUpSubscribe() { func (mids *messageIds) cleanUpSubscribe() {
mids.Lock() mids.mu.Lock()
for mid, token := range mids.index { for mid, token := range mids.index {
switch token.(type) { switch token.(type) {
case *SubscribeToken: case *SubscribeToken:
@ -77,19 +77,19 @@ func (mids *messageIds) cleanUpSubscribe() {
delete(mids.index, mid) delete(mids.index, mid)
} }
} }
mids.Unlock() mids.mu.Unlock()
DEBUG.Println(MID, "cleaned up subs") DEBUG.Println(MID, "cleaned up subs")
} }
func (mids *messageIds) freeID(id uint16) { func (mids *messageIds) freeID(id uint16) {
mids.Lock() mids.mu.Lock()
delete(mids.index, id) delete(mids.index, id)
mids.Unlock() mids.mu.Unlock()
} }
func (mids *messageIds) claimID(token tokenCompletor, id uint16) { func (mids *messageIds) claimID(token tokenCompletor, id uint16) {
mids.Lock() mids.mu.Lock()
defer mids.Unlock() defer mids.mu.Unlock()
if _, ok := mids.index[id]; !ok { if _, ok := mids.index[id]; !ok {
mids.index[id] = token mids.index[id] = token
} else { } else {
@ -105,8 +105,8 @@ func (mids *messageIds) claimID(token tokenCompletor, id uint16) {
// getID will return an available id or 0 if none available // getID will return an available id or 0 if none available
// The id will generally be the previous id + 1 (because this makes tracing messages a bit simpler) // The id will generally be the previous id + 1 (because this makes tracing messages a bit simpler)
func (mids *messageIds) getID(t tokenCompletor) uint16 { func (mids *messageIds) getID(t tokenCompletor) uint16 {
mids.Lock() mids.mu.Lock()
defer mids.Unlock() defer mids.mu.Unlock()
i := mids.lastIssuedID // note: the only situation where lastIssuedID is 0 the map will be empty i := mids.lastIssuedID // note: the only situation where lastIssuedID is 0 the map will be empty
looped := false // uint16 will loop from 65535->0 looped := false // uint16 will loop from 65535->0
for { for {
@ -127,8 +127,8 @@ func (mids *messageIds) getID(t tokenCompletor) uint16 {
} }
func (mids *messageIds) getToken(id uint16) tokenCompletor { func (mids *messageIds) getToken(id uint16) tokenCompletor {
mids.RLock() mids.mu.RLock()
defer mids.RUnlock() defer mids.mu.RUnlock()
if token, ok := mids.index[id]; ok { if token, ok := mids.index[id]; ok {
return token return token
} }

View File

@ -150,7 +150,7 @@ type incomingComms struct {
// startIncomingComms initiates incoming communications; this includes starting a goroutine to process incoming // startIncomingComms initiates incoming communications; this includes starting a goroutine to process incoming
// messages. // messages.
// Accepts a channel of inbound messages from the store (persisted messages); note this must be closed as soon as the // Accepts a channel of inbound messages from the store (persisted messages); note this must be closed as soon as
// everything in the store has been sent. // everything in the store has been sent.
// Returns a channel that will be passed any received packets; this will be closed on a network error (and inboundFromStore closed) // Returns a channel that will be passed any received packets; this will be closed on a network error (and inboundFromStore closed)
func startIncomingComms(conn io.Reader, func startIncomingComms(conn io.Reader,
@ -332,7 +332,7 @@ func startOutgoingComms(conn net.Conn,
DEBUG.Println(NET, "outbound wrote disconnect, closing connection") DEBUG.Println(NET, "outbound wrote disconnect, closing connection")
// As per the MQTT spec "After sending a DISCONNECT Packet the Client MUST close the Network Connection" // As per the MQTT spec "After sending a DISCONNECT Packet the Client MUST close the Network Connection"
// Closing the connection will cause the goroutines to end in sequence (starting with incoming comms) // Closing the connection will cause the goroutines to end in sequence (starting with incoming comms)
conn.Close() _ = conn.Close()
} }
case msg, ok := <-oboundFromIncoming: // message triggered by an inbound message (PubrecPacket or PubrelPacket) case msg, ok := <-oboundFromIncoming: // message triggered by an inbound message (PubrecPacket or PubrelPacket)
if !ok { if !ok {
@ -370,9 +370,10 @@ type commsFns interface {
// startComms initiates goroutines that handles communications over the network connection // startComms initiates goroutines that handles communications over the network connection
// Messages will be stored (via commsFns) and deleted from the store as necessary // Messages will be stored (via commsFns) and deleted from the store as necessary
// It returns two channels: // It returns two channels:
// packets.PublishPacket - Will receive publish packets received over the network. //
// Closed when incoming comms routines exit (on shutdown or if network link closed) // packets.PublishPacket - Will receive publish packets received over the network.
// error - Any errors will be sent on this channel. The channel is closed when all comms routines have shut down // Closed when incoming comms routines exit (on shutdown or if network link closed)
// error - Any errors will be sent on this channel. The channel is closed when all comms routines have shut down
// //
// Note: The comms routines monitoring oboundp and obound will not shutdown until those channels are both closed. Any messages received between the // Note: The comms routines monitoring oboundp and obound will not shutdown until those channels are both closed. Any messages received between the
// connection being closed and those channels being closed will generate errors (and nothing will be sent). That way the chance of a deadlock is // connection being closed and those channels being closed will generate errors (and nothing will be sent). That way the chance of a deadlock is

View File

@ -40,10 +40,14 @@ import (
func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header, websocketOptions *WebsocketOptions, dialer *net.Dialer) (net.Conn, error) { func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header, websocketOptions *WebsocketOptions, dialer *net.Dialer) (net.Conn, error) {
switch uri.Scheme { switch uri.Scheme {
case "ws": case "ws":
conn, err := NewWebsocket(uri.String(), nil, timeout, headers, websocketOptions) dialURI := *uri // #623 - Gorilla Websockets does not accept URL's where uri.User != nil
dialURI.User = nil
conn, err := NewWebsocket(dialURI.String(), nil, timeout, headers, websocketOptions)
return conn, err return conn, err
case "wss": case "wss":
conn, err := NewWebsocket(uri.String(), tlsc, timeout, headers, websocketOptions) dialURI := *uri // #623 - Gorilla Websockets does not accept URL's where uri.User != nil
dialURI.User = nil
conn, err := NewWebsocket(dialURI.String(), tlsc, timeout, headers, websocketOptions)
return conn, err return conn, err
case "mqtt", "tcp": case "mqtt", "tcp":
allProxy := os.Getenv("all_proxy") allProxy := os.Getenv("all_proxy")

View File

@ -104,6 +104,7 @@ type ClientOptions struct {
MaxResumePubInFlight int // // 0 = no limit; otherwise this is the maximum simultaneous messages sent while resuming MaxResumePubInFlight int // // 0 = no limit; otherwise this is the maximum simultaneous messages sent while resuming
Dialer *net.Dialer Dialer *net.Dialer
CustomOpenConnectionFn OpenConnectionFunc CustomOpenConnectionFn OpenConnectionFunc
AutoAckDisabled bool
} }
// NewClientOptions will create a new ClientClientOptions type with some // NewClientOptions will create a new ClientClientOptions type with some
@ -147,6 +148,7 @@ func NewClientOptions() *ClientOptions {
WebsocketOptions: &WebsocketOptions{}, WebsocketOptions: &WebsocketOptions{},
Dialer: &net.Dialer{Timeout: 30 * time.Second}, Dialer: &net.Dialer{Timeout: 30 * time.Second},
CustomOpenConnectionFn: nil, CustomOpenConnectionFn: nil,
AutoAckDisabled: false,
} }
return o return o
} }
@ -446,3 +448,10 @@ func (o *ClientOptions) SetCustomOpenConnectionFn(customOpenConnectionFn OpenCon
} }
return o return o
} }
// SetAutoAckDisabled enables or disables the Automated Acking of Messages received by the handler.
// By default it is set to false. Setting it to true will disable the auto-ack globally.
func (o *ClientOptions) SetAutoAckDisabled(autoAckDisabled bool) *ClientOptions {
o.AutoAckDisabled = autoAckDisabled
return o
}

View File

@ -32,16 +32,16 @@ import (
func keepalive(c *client, conn io.Writer) { func keepalive(c *client, conn io.Writer) {
defer c.workers.Done() defer c.workers.Done()
DEBUG.Println(PNG, "keepalive starting") DEBUG.Println(PNG, "keepalive starting")
var checkInterval int64 var checkInterval time.Duration
var pingSent time.Time var pingSent time.Time
if c.options.KeepAlive > 10 { if c.options.KeepAlive > 10 {
checkInterval = 5 checkInterval = 5 * time.Second
} else { } else {
checkInterval = c.options.KeepAlive / 2 checkInterval = time.Duration(c.options.KeepAlive) * time.Second / 2
} }
intervalTicker := time.NewTicker(time.Duration(checkInterval * int64(time.Second))) intervalTicker := time.NewTicker(checkInterval)
defer intervalTicker.Stop() defer intervalTicker.Stop()
for { for {
@ -58,8 +58,8 @@ func keepalive(c *client, conn io.Writer) {
if atomic.LoadInt32(&c.pingOutstanding) == 0 { if atomic.LoadInt32(&c.pingOutstanding) == 0 {
DEBUG.Println(PNG, "keepalive sending ping") DEBUG.Println(PNG, "keepalive sending ping")
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket) ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
// We don't want to wait behind large messages being sent, the Write call // We don't want to wait behind large messages being sent, the `Write` call
// will block until it it able to send the packet. // will block until it is able to send the packet.
atomic.StoreInt32(&c.pingOutstanding, 1) atomic.StoreInt32(&c.pingOutstanding, 1)
if err := ping.Write(conn); err != nil { if err := ping.Write(conn); err != nil {
ERROR.Println(PNG, err) ERROR.Println(PNG, err)

View File

@ -186,7 +186,9 @@ func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order
wg.Add(1) wg.Add(1)
go func() { go func() {
hd(client, m) hd(client, m)
m.Ack() if !client.options.AutoAckDisabled {
m.Ack()
}
wg.Done() wg.Done()
}() }()
} }
@ -201,7 +203,9 @@ func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order
wg.Add(1) wg.Add(1)
go func() { go func() {
r.defaultHandler(client, m) r.defaultHandler(client, m)
m.Ack() if !client.options.AutoAckDisabled {
m.Ack()
}
wg.Done() wg.Done()
}() }()
} }
@ -212,7 +216,9 @@ func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order
r.RUnlock() r.RUnlock()
for _, handler := range handlers { for _, handler := range handlers {
handler(client, m) handler(client, m)
m.Ack() if !client.options.AutoAckDisabled {
m.Ack()
}
} }
// DEBUG.Println(ROU, "matchAndDispatch handled message") // DEBUG.Println(ROU, "matchAndDispatch handled message")
} }

296
vendor/github.com/eclipse/paho.mqtt.golang/status.go generated vendored Normal file
View File

@ -0,0 +1,296 @@
/*
* Copyright (c) 2021 IBM Corp and others.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v2.0
* and Eclipse Distribution License v1.0 which accompany this distribution.
*
* The Eclipse Public License is available at
* https://www.eclipse.org/legal/epl-2.0/
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
* Matt Brittan
*/
package mqtt
import (
"errors"
"sync"
)
// Status - Manage the connection status
// Multiple go routines will want to access/set this. Previously status was implemented as a `uint32` and updated
// with a mixture of atomic functions and a mutex (leading to some deadlock type issues that were very hard to debug).
// In this new implementation `connectionStatus` takes over managing the state and provides functions that allow the
// client to request a move to a particular state (it may reject these requests!). In some cases the 'state' is
// transitory, for example `connecting`, in those cases a function will be returned that allows the client to move
// to a more static state (`disconnected` or `connected`).
// This "belts-and-braces" may be a little over the top but issues with the status have caused a number of difficult
// to trace bugs in the past and the likelihood that introducing a new system would introduce bugs seemed high!
// I have written this in a way that should make it very difficult to misuse it (but it does make things a little
// complex with functions returning functions that return functions!).
type status uint32
const (
disconnected status = iota // default (nil) status is disconnected
disconnecting // Transitioning from one of the below states back to disconnected
connecting
reconnecting
connected
)
// String simplify output of statuses
func (s status) String() string {
switch s {
case disconnected:
return "disconnected"
case disconnecting:
return "disconnecting"
case connecting:
return "connecting"
case reconnecting:
return "reconnecting"
case connected:
return "connected"
default:
return "invalid"
}
}
type connCompletedFn func(success bool) error
type disconnectCompletedFn func()
type connectionLostHandledFn func(bool) (connCompletedFn, error)
/* State transitions
static states are `disconnected` and `connected`. For all other states a process will hold a function that will move
the state to one of those. That function effectively owns the state and any other changes must not proceed until it
completes. One exception to that is that the state can always be moved to `disconnecting` which provides a signal that
transitions to `connected` will be rejected (this is required because a Disconnect can be requested while in the
Connecting state).
# Basic Operations
The standard workflows are:
disconnected -> `Connecting()` -> connecting -> `connCompletedFn(true)` -> connected
connected -> `Disconnecting()` -> disconnecting -> `disconnectCompletedFn()` -> disconnected
connected -> `ConnectionLost(false)` -> disconnecting -> `connectionLostHandledFn(true/false)` -> disconnected
connected -> `ConnectionLost(true)` -> disconnecting -> `connectionLostHandledFn(true)` -> connected
Unfortunately the above workflows are complicated by the fact that `Disconnecting()` or `ConnectionLost()` may,
potentially, be called at any time (i.e. whilst in the middle of transitioning between states). If this happens:
* The state will be set to disconnecting (which will prevent any request to move the status to connected)
* The call to `Disconnecting()`/`ConnectionLost()` will block until the previously active call completes and then
handle the disconnection.
Reading the tests (unit_status_test.go) might help understand these rules.
*/
var (
errAbortConnection = errors.New("disconnect called whist connection attempt in progress")
errAlreadyConnectedOrReconnecting = errors.New("status is already connected or reconnecting")
errStatusMustBeDisconnected = errors.New("status can only transition to connecting from disconnected")
errAlreadyDisconnected = errors.New("status is already disconnected")
errDisconnectionRequested = errors.New("disconnection was requested whilst the action was in progress")
errDisconnectionInProgress = errors.New("disconnection already in progress")
errAlreadyHandlingConnectionLoss = errors.New("status is already Connection Lost")
errConnLossWhileDisconnecting = errors.New("connection status is disconnecting so loss of connection is expected")
)
// connectionStatus encapsulates, and protects, the connection status.
type connectionStatus struct {
sync.RWMutex // Protects the variables below
status status
willReconnect bool // only used when status == disconnecting. Indicates that an attempt will be made to reconnect (allows us to abort that)
// Some statuses are transitional (e.g. connecting, connectionLost, reconnecting, disconnecting), that is, whatever
// process moves us into that status will move us out of it when an action is complete. Sometimes other users
// will need to know when the action is complete (e.g. the user calls `Disconnect()` whilst the status is
// `connecting`). `actionCompleted` will be set whenever we move into one of the above statues and the channel
// returned to anything else requesting a status change. The channel will be closed when the operation is complete.
actionCompleted chan struct{} // Only valid whilst status is Connecting or Reconnecting; will be closed when connection completed (success or failure)
}
// ConnectionStatus returns the connection status.
// WARNING: the status may change at any time so users should not assume they are the only goroutine touching this
func (c *connectionStatus) ConnectionStatus() status {
c.RLock()
defer c.RUnlock()
return c.status
}
// ConnectionStatusRetry returns the connection status and retry flag (indicates that we expect to reconnect).
// WARNING: the status may change at any time so users should not assume they are the only goroutine touching this
func (c *connectionStatus) ConnectionStatusRetry() (status, bool) {
c.RLock()
defer c.RUnlock()
return c.status, c.willReconnect
}
// Connecting - Changes the status to connecting if that is a permitted operation
// Will do nothing unless the current status is disconnected
// Returns a function that MUST be called when the operation is complete (pass in true if successful)
func (c *connectionStatus) Connecting() (connCompletedFn, error) {
c.Lock()
defer c.Unlock()
// Calling Connect when already connecting (or if reconnecting) may not always be considered an error
if c.status == connected || c.status == reconnecting {
return nil, errAlreadyConnectedOrReconnecting
}
if c.status != disconnected {
return nil, errStatusMustBeDisconnected
}
c.status = connecting
c.actionCompleted = make(chan struct{})
return c.connected, nil
}
// connected is an internal function (it is returned by functions that set the status to connecting or reconnecting,
// calling it completes the operation). `success` is used to indicate whether the operation was successfully completed.
func (c *connectionStatus) connected(success bool) error {
c.Lock()
defer func() {
close(c.actionCompleted) // Alert anything waiting on the connection process to complete
c.actionCompleted = nil // Be tidy
c.Unlock()
}()
// Status may have moved to disconnecting in the interim (i.e. at users request)
if c.status == disconnecting {
return errAbortConnection
}
if success {
c.status = connected
} else {
c.status = disconnected
}
return nil
}
// Disconnecting - should be called when beginning the disconnection process (cleanup etc.).
// Can be called from ANY status and the end result will always be a status of disconnected
// Note that if a connection/reconnection attempt is in progress this function will set the status to `disconnecting`
// then block until the connection process completes (or aborts).
// Returns a function that MUST be called when the operation is complete (assumed to always be successful!)
func (c *connectionStatus) Disconnecting() (disconnectCompletedFn, error) {
c.Lock()
if c.status == disconnected {
c.Unlock()
return nil, errAlreadyDisconnected // May not always be treated as an error
}
if c.status == disconnecting { // Need to wait for existing process to complete
c.willReconnect = false // Ensure that the existing disconnect process will not reconnect
disConnectDone := c.actionCompleted
c.Unlock()
<-disConnectDone // Wait for existing operation to complete
return nil, errAlreadyDisconnected // Well we are now!
}
prevStatus := c.status
c.status = disconnecting
// We may need to wait for connection/reconnection process to complete (they should regularly check the status)
if prevStatus == connecting || prevStatus == reconnecting {
connectDone := c.actionCompleted
c.Unlock() // Safe because the only way to leave the disconnecting status is via this function
<-connectDone
if prevStatus == reconnecting && !c.willReconnect {
return nil, errAlreadyDisconnected // Following connectionLost process we will be disconnected
}
c.Lock()
}
c.actionCompleted = make(chan struct{})
c.Unlock()
return c.disconnectionCompleted, nil
}
// disconnectionCompleted is an internal function (it is returned by functions that set the status to disconnecting)
func (c *connectionStatus) disconnectionCompleted() {
c.Lock()
defer c.Unlock()
c.status = disconnected
close(c.actionCompleted) // Alert anything waiting on the connection process to complete
c.actionCompleted = nil
}
// ConnectionLost - should be called when the connection is lost.
// This really only differs from Disconnecting in that we may transition into a reconnection (but that could be
// cancelled something else calls Disconnecting in the meantime).
// The returned function should be called when cleanup is completed. It will return a function to be called when
// reconnect completes (or nil if no reconnect requested/disconnect called in the interim).
// Note: This function may block if a connection is in progress (the move to connected will be rejected)
func (c *connectionStatus) ConnectionLost(willReconnect bool) (connectionLostHandledFn, error) {
c.Lock()
defer c.Unlock()
if c.status == disconnected {
return nil, errAlreadyDisconnected
}
if c.status == disconnecting { // its expected that connection lost will be called during the disconnection process
return nil, errDisconnectionInProgress
}
c.willReconnect = willReconnect
prevStatus := c.status
c.status = disconnecting
// There is a slight possibility that a connection attempt is in progress (connection up and goroutines started but
// status not yet changed). By changing the status we ensure that process will exit cleanly
if prevStatus == connecting || prevStatus == reconnecting {
connectDone := c.actionCompleted
c.Unlock() // Safe because the only way to leave the disconnecting status is via this function
<-connectDone
c.Lock()
if !willReconnect {
// In this case the connection will always be aborted so there is nothing more for us to do
return nil, errAlreadyDisconnected
}
}
c.actionCompleted = make(chan struct{})
return c.getConnectionLostHandler(willReconnect), nil
}
// getConnectionLostHandler is an internal function. It returns the function to be returned by ConnectionLost
func (c *connectionStatus) getConnectionLostHandler(reconnectRequested bool) connectionLostHandledFn {
return func(proceed bool) (connCompletedFn, error) {
// Note that connCompletedFn will only be provided if both reconnectRequested and proceed are true
c.Lock()
defer c.Unlock()
// `Disconnecting()` may have been called while the disconnection was being processed (this makes it permanent!)
if !c.willReconnect || !proceed {
c.status = disconnected
close(c.actionCompleted) // Alert anything waiting on the connection process to complete
c.actionCompleted = nil
if !reconnectRequested || !proceed {
return nil, nil
}
return nil, errDisconnectionRequested
}
c.status = reconnecting
return c.connected, nil // Note that c.actionCompleted is still live and will be closed in connected
}
}
// forceConnectionStatus - forces the connection status to the specified value.
// This should only be used when there is no alternative (i.e. only in tests and to recover from situations that
// are unexpected)
func (c *connectionStatus) forceConnectionStatus(s status) {
c.Lock()
defer c.Unlock()
c.status = s
}

View File

@ -6,6 +6,13 @@
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
---
⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)**
---
### Documentation ### Documentation
* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) * [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
@ -30,35 +37,3 @@ The Gorilla WebSocket package passes the server tests in the [Autobahn Test
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
### Gorilla WebSocket compared with other packages
<table>
<tr>
<th></th>
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
</tr>
<tr>
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
<tr><td>Passes <a href="https://github.com/crossbario/autobahn-testsuite">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
<tr><td colspan="3">Other Features</tr></td>
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
</table>
Notes:
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
2. The application can get the type of a received data message by implementing
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
function.
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
Read returns when the input buffer is full or a frame boundary is
encountered. Each call to Write sends a single frame message. The Gorilla
io.Reader and io.WriteCloser operate on a single WebSocket message.

View File

@ -48,15 +48,23 @@ func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufS
} }
// A Dialer contains options for connecting to WebSocket server. // A Dialer contains options for connecting to WebSocket server.
//
// It is safe to call Dialer's methods concurrently.
type Dialer struct { type Dialer struct {
// NetDial specifies the dial function for creating TCP connections. If // NetDial specifies the dial function for creating TCP connections. If
// NetDial is nil, net.Dial is used. // NetDial is nil, net.Dial is used.
NetDial func(network, addr string) (net.Conn, error) NetDial func(network, addr string) (net.Conn, error)
// NetDialContext specifies the dial function for creating TCP connections. If // NetDialContext specifies the dial function for creating TCP connections. If
// NetDialContext is nil, net.DialContext is used. // NetDialContext is nil, NetDial is used.
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
// NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If
// NetDialTLSContext is nil, NetDialContext is used.
// If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and
// TLSClientConfig is ignored.
NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error)
// Proxy specifies a function to return a proxy for a given // Proxy specifies a function to return a proxy for a given
// Request. If the function returns a non-nil error, the // Request. If the function returns a non-nil error, the
// request is aborted with the provided error. // request is aborted with the provided error.
@ -65,6 +73,8 @@ type Dialer struct {
// TLSClientConfig specifies the TLS configuration to use with tls.Client. // TLSClientConfig specifies the TLS configuration to use with tls.Client.
// If nil, the default configuration is used. // If nil, the default configuration is used.
// If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake
// is done there and TLSClientConfig is ignored.
TLSClientConfig *tls.Config TLSClientConfig *tls.Config
// HandshakeTimeout specifies the duration for the handshake to complete. // HandshakeTimeout specifies the duration for the handshake to complete.
@ -176,7 +186,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
} }
req := &http.Request{ req := &http.Request{
Method: "GET", Method: http.MethodGet,
URL: u, URL: u,
Proto: "HTTP/1.1", Proto: "HTTP/1.1",
ProtoMajor: 1, ProtoMajor: 1,
@ -237,13 +247,32 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
// Get network dial function. // Get network dial function.
var netDial func(network, add string) (net.Conn, error) var netDial func(network, add string) (net.Conn, error)
if d.NetDialContext != nil { switch u.Scheme {
netDial = func(network, addr string) (net.Conn, error) { case "http":
return d.NetDialContext(ctx, network, addr) if d.NetDialContext != nil {
netDial = func(network, addr string) (net.Conn, error) {
return d.NetDialContext(ctx, network, addr)
}
} else if d.NetDial != nil {
netDial = d.NetDial
} }
} else if d.NetDial != nil { case "https":
netDial = d.NetDial if d.NetDialTLSContext != nil {
} else { netDial = func(network, addr string) (net.Conn, error) {
return d.NetDialTLSContext(ctx, network, addr)
}
} else if d.NetDialContext != nil {
netDial = func(network, addr string) (net.Conn, error) {
return d.NetDialContext(ctx, network, addr)
}
} else if d.NetDial != nil {
netDial = d.NetDial
}
default:
return nil, nil, errMalformedURL
}
if netDial == nil {
netDialer := &net.Dialer{} netDialer := &net.Dialer{}
netDial = func(network, addr string) (net.Conn, error) { netDial = func(network, addr string) (net.Conn, error) {
return netDialer.DialContext(ctx, network, addr) return netDialer.DialContext(ctx, network, addr)
@ -304,7 +333,9 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
} }
}() }()
if u.Scheme == "https" { if u.Scheme == "https" && d.NetDialTLSContext == nil {
// If NetDialTLSContext is set, assume that the TLS handshake has already been done
cfg := cloneTLSConfig(d.TLSClientConfig) cfg := cloneTLSConfig(d.TLSClientConfig)
if cfg.ServerName == "" { if cfg.ServerName == "" {
cfg.ServerName = hostNoPort cfg.ServerName = hostNoPort
@ -312,11 +343,12 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
tlsConn := tls.Client(netConn, cfg) tlsConn := tls.Client(netConn, cfg)
netConn = tlsConn netConn = tlsConn
var err error if trace != nil && trace.TLSHandshakeStart != nil {
if trace != nil { trace.TLSHandshakeStart()
err = doHandshakeWithTrace(trace, tlsConn, cfg) }
} else { err := doHandshake(ctx, tlsConn, cfg)
err = doHandshake(tlsConn, cfg) if trace != nil && trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
} }
if err != nil { if err != nil {
@ -348,8 +380,8 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
} }
if resp.StatusCode != 101 || if resp.StatusCode != 101 ||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || !tokenListContainsValue(resp.Header, "Upgrade", "websocket") ||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || !tokenListContainsValue(resp.Header, "Connection", "upgrade") ||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
// Before closing the network connection on return from this // Before closing the network connection on return from this
// function, slurp up some of the response to aid application // function, slurp up some of the response to aid application
@ -382,14 +414,9 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h
return conn, resp, nil return conn, resp, nil
} }
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if err := tlsConn.Handshake(); err != nil { if cfg == nil {
return err return &tls.Config{}
} }
if !cfg.InsecureSkipVerify { return cfg.Clone()
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
return err
}
}
return nil
} }

View File

@ -1,16 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package websocket
import "crypto/tls"
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@ -1,38 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.8
package websocket
import "crypto/tls"
// cloneTLSConfig clones all public fields except the fields
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
// config in active use.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return &tls.Config{
Rand: cfg.Rand,
Time: cfg.Time,
Certificates: cfg.Certificates,
NameToCertificate: cfg.NameToCertificate,
GetCertificate: cfg.GetCertificate,
RootCAs: cfg.RootCAs,
NextProtos: cfg.NextProtos,
ServerName: cfg.ServerName,
ClientAuth: cfg.ClientAuth,
ClientCAs: cfg.ClientCAs,
InsecureSkipVerify: cfg.InsecureSkipVerify,
CipherSuites: cfg.CipherSuites,
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
ClientSessionCache: cfg.ClientSessionCache,
MinVersion: cfg.MinVersion,
MaxVersion: cfg.MaxVersion,
CurvePreferences: cfg.CurvePreferences,
}
}

View File

@ -13,6 +13,7 @@ import (
"math/rand" "math/rand"
"net" "net"
"strconv" "strconv"
"strings"
"sync" "sync"
"time" "time"
"unicode/utf8" "unicode/utf8"
@ -401,6 +402,12 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error
return nil return nil
} }
func (c *Conn) writeBufs(bufs ...[]byte) error {
b := net.Buffers(bufs)
_, err := b.WriteTo(c.conn)
return err
}
// WriteControl writes a control message with the given deadline. The allowed // WriteControl writes a control message with the given deadline. The allowed
// message types are CloseMessage, PingMessage and PongMessage. // message types are CloseMessage, PingMessage and PongMessage.
func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
@ -794,47 +801,69 @@ func (c *Conn) advanceFrame() (int, error) {
} }
// 2. Read and parse first two bytes of frame header. // 2. Read and parse first two bytes of frame header.
// To aid debugging, collect and report all errors in the first two bytes
// of the header.
var errors []string
p, err := c.read(2) p, err := c.read(2)
if err != nil { if err != nil {
return noFrame, err return noFrame, err
} }
final := p[0]&finalBit != 0
frameType := int(p[0] & 0xf) frameType := int(p[0] & 0xf)
final := p[0]&finalBit != 0
rsv1 := p[0]&rsv1Bit != 0
rsv2 := p[0]&rsv2Bit != 0
rsv3 := p[0]&rsv3Bit != 0
mask := p[1]&maskBit != 0 mask := p[1]&maskBit != 0
c.setReadRemaining(int64(p[1] & 0x7f)) c.setReadRemaining(int64(p[1] & 0x7f))
c.readDecompress = false c.readDecompress = false
if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { if rsv1 {
c.readDecompress = true if c.newDecompressionReader != nil {
p[0] &^= rsv1Bit c.readDecompress = true
} else {
errors = append(errors, "RSV1 set")
}
} }
if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { if rsv2 {
return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) errors = append(errors, "RSV2 set")
}
if rsv3 {
errors = append(errors, "RSV3 set")
} }
switch frameType { switch frameType {
case CloseMessage, PingMessage, PongMessage: case CloseMessage, PingMessage, PongMessage:
if c.readRemaining > maxControlFramePayloadSize { if c.readRemaining > maxControlFramePayloadSize {
return noFrame, c.handleProtocolError("control frame length > 125") errors = append(errors, "len > 125 for control")
} }
if !final { if !final {
return noFrame, c.handleProtocolError("control frame not final") errors = append(errors, "FIN not set on control")
} }
case TextMessage, BinaryMessage: case TextMessage, BinaryMessage:
if !c.readFinal { if !c.readFinal {
return noFrame, c.handleProtocolError("message start before final message frame") errors = append(errors, "data before FIN")
} }
c.readFinal = final c.readFinal = final
case continuationFrame: case continuationFrame:
if c.readFinal { if c.readFinal {
return noFrame, c.handleProtocolError("continuation after final message frame") errors = append(errors, "continuation after FIN")
} }
c.readFinal = final c.readFinal = final
default: default:
return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) errors = append(errors, "bad opcode "+strconv.Itoa(frameType))
}
if mask != c.isServer {
errors = append(errors, "bad MASK")
}
if len(errors) > 0 {
return noFrame, c.handleProtocolError(strings.Join(errors, ", "))
} }
// 3. Read and parse frame length as per // 3. Read and parse frame length as per
@ -872,10 +901,6 @@ func (c *Conn) advanceFrame() (int, error) {
// 4. Handle frame masking. // 4. Handle frame masking.
if mask != c.isServer {
return noFrame, c.handleProtocolError("incorrect mask flag")
}
if mask { if mask {
c.readMaskPos = 0 c.readMaskPos = 0
p, err := c.read(len(c.readMaskKey)) p, err := c.read(len(c.readMaskKey))
@ -935,7 +960,7 @@ func (c *Conn) advanceFrame() (int, error) {
if len(payload) >= 2 { if len(payload) >= 2 {
closeCode = int(binary.BigEndian.Uint16(payload)) closeCode = int(binary.BigEndian.Uint16(payload))
if !isValidReceivedCloseCode(closeCode) { if !isValidReceivedCloseCode(closeCode) {
return noFrame, c.handleProtocolError("invalid close code") return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode))
} }
closeText = string(payload[2:]) closeText = string(payload[2:])
if !utf8.ValidString(closeText) { if !utf8.ValidString(closeText) {
@ -952,7 +977,11 @@ func (c *Conn) advanceFrame() (int, error) {
} }
func (c *Conn) handleProtocolError(message string) error { func (c *Conn) handleProtocolError(message string) error {
c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) data := FormatCloseMessage(CloseProtocolError, message)
if len(data) > maxControlFramePayloadSize {
data = data[:maxControlFramePayloadSize]
}
c.WriteControl(CloseMessage, data, time.Now().Add(writeWait))
return errors.New("websocket: " + message) return errors.New("websocket: " + message)
} }

View File

@ -1,15 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package websocket
import "net"
func (c *Conn) writeBufs(bufs ...[]byte) error {
b := net.Buffers(bufs)
_, err := b.WriteTo(c.conn)
return err
}

View File

@ -1,18 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.8
package websocket
func (c *Conn) writeBufs(bufs ...[]byte) error {
for _, buf := range bufs {
if len(buf) > 0 {
if _, err := c.conn.Write(buf); err != nil {
return err
}
}
}
return nil
}

View File

@ -2,6 +2,7 @@
// this source code is governed by a BSD-style license that can be found in the // this source code is governed by a BSD-style license that can be found in the
// LICENSE file. // LICENSE file.
//go:build !appengine
// +build !appengine // +build !appengine
package websocket package websocket

View File

@ -2,6 +2,7 @@
// this source code is governed by a BSD-style license that can be found in the // this source code is governed by a BSD-style license that can be found in the
// LICENSE file. // LICENSE file.
//go:build appengine
// +build appengine // +build appengine
package websocket package websocket

View File

@ -48,7 +48,7 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error)
} }
connectReq := &http.Request{ connectReq := &http.Request{
Method: "CONNECT", Method: http.MethodConnect,
URL: &url.URL{Opaque: addr}, URL: &url.URL{Opaque: addr},
Host: addr, Host: addr,
Header: connectHeader, Header: connectHeader,

View File

@ -23,6 +23,8 @@ func (e HandshakeError) Error() string { return e.message }
// Upgrader specifies parameters for upgrading an HTTP connection to a // Upgrader specifies parameters for upgrading an HTTP connection to a
// WebSocket connection. // WebSocket connection.
//
// It is safe to call Upgrader's methods concurrently.
type Upgrader struct { type Upgrader struct {
// HandshakeTimeout specifies the duration for the handshake to complete. // HandshakeTimeout specifies the duration for the handshake to complete.
HandshakeTimeout time.Duration HandshakeTimeout time.Duration
@ -115,8 +117,8 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header
// Upgrade upgrades the HTTP server connection to the WebSocket protocol. // Upgrade upgrades the HTTP server connection to the WebSocket protocol.
// //
// The responseHeader is included in the response to the client's upgrade // The responseHeader is included in the response to the client's upgrade
// request. Use the responseHeader to specify cookies (Set-Cookie) and the // request. Use the responseHeader to specify cookies (Set-Cookie). To specify
// application negotiated subprotocol (Sec-WebSocket-Protocol). // subprotocols supported by the server, set Upgrader.Subprotocols directly.
// //
// If the upgrade fails, then Upgrade replies to the client with an HTTP error // If the upgrade fails, then Upgrade replies to the client with an HTTP error
// response. // response.
@ -131,7 +133,7 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
} }
if r.Method != "GET" { if r.Method != http.MethodGet {
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
} }

21
vendor/github.com/gorilla/websocket/tls_handshake.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
//go:build go1.17
// +build go1.17
package websocket
import (
"context"
"crypto/tls"
)
func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
if err := tlsConn.HandshakeContext(ctx); err != nil {
return err
}
if !cfg.InsecureSkipVerify {
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,21 @@
//go:build !go1.17
// +build !go1.17
package websocket
import (
"context"
"crypto/tls"
)
func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
if err := tlsConn.Handshake(); err != nil {
return err
}
if !cfg.InsecureSkipVerify {
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
return err
}
}
return nil
}

View File

@ -1,19 +0,0 @@
// +build go1.8
package websocket
import (
"crypto/tls"
"net/http/httptrace"
)
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
if trace.TLSHandshakeStart != nil {
trace.TLSHandshakeStart()
}
err := doHandshake(tlsConn, cfg)
if trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
}
return err
}

View File

@ -1,12 +0,0 @@
// +build !go1.8
package websocket
import (
"crypto/tls"
"net/http/httptrace"
)
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
return doHandshake(tlsConn, cfg)
}

View File

@ -1,19 +0,0 @@
coverage:
range: 80..100
round: down
precision: 2
status:
project: # measuring the overall project coverage
default: # context, you can create multiple ones with custom titles
enabled: yes # must be yes|true to enable this status
target: 100 # specify the target coverage for each commit status
# option: "auto" (must increase from parent commit or pull request base)
# option: "X%" a static target percentage to hit
if_not_found: success # if parent is not found report status as success, error, or failure
if_ci_failed: error # if ci fails report status as success, error, or failure
# Also update COVER_IGNORE_PKGS in the Makefile.
ignore:
- /internal/gen-atomicint/
- /internal/gen-valuewrapper/

12
vendor/go.uber.org/atomic/.gitignore generated vendored
View File

@ -1,12 +0,0 @@
/bin
.DS_Store
/vendor
cover.html
cover.out
lint.log
# Binaries
*.test
# Profiling output
*.prof

View File

@ -1,27 +0,0 @@
sudo: false
language: go
go_import_path: go.uber.org/atomic
env:
global:
- GO111MODULE=on
matrix:
include:
- go: oldstable
- go: stable
env: LINT=1
cache:
directories:
- vendor
before_install:
- go version
script:
- test -z "$LINT" || make lint
- make cover
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,76 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.7.0] - 2020-09-14
### Added
- Support JSON serialization and deserialization of primitive atomic types.
- Support Text marshalling and unmarshalling for string atomics.
### Changed
- Disallow incorrect comparison of atomic values in a non-atomic way.
### Removed
- Remove dependency on `golang.org/x/{lint, tools}`.
## [1.6.0] - 2020-02-24
### Changed
- Drop library dependency on `golang.org/x/{lint, tools}`.
## [1.5.1] - 2019-11-19
- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
causing `CAS` to fail even though the old value matches.
## [1.5.0] - 2019-10-29
### Changed
- With Go modules, only the `go.uber.org/atomic` import path is supported now.
If you need to use the old import path, please add a `replace` directive to
your `go.mod`.
## [1.4.0] - 2019-05-01
### Added
- Add `atomic.Error` type for atomic operations on `error` values.
## [1.3.2] - 2018-05-02
### Added
- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
## [1.3.1] - 2017-11-14
### Fixed
- Revert optimization for `atomic.String.Store("")` which caused data races.
## [1.3.0] - 2017-11-13
### Added
- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
### Changed
- Optimize `atomic.String.Store("")` by avoiding an allocation.
## [1.2.0] - 2017-04-12
### Added
- Shadow `atomic.Value` from `sync/atomic`.
## [1.1.0] - 2017-03-10
### Added
- Add atomic `Float64` type.
### Changed
- Support new `go.uber.org/atomic` import path.
## [1.0.0] - 2016-07-18
- Initial release.
[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0

View File

@ -1,19 +0,0 @@
Copyright (c) 2016 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

78
vendor/go.uber.org/atomic/Makefile generated vendored
View File

@ -1,78 +0,0 @@
# Directory to place `go install`ed binaries into.
export GOBIN ?= $(shell pwd)/bin
GOLINT = $(GOBIN)/golint
GEN_ATOMICINT = $(GOBIN)/gen-atomicint
GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
STATICCHECK = $(GOBIN)/staticcheck
GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
# Also update ignore section in .codecov.yml.
COVER_IGNORE_PKGS = \
go.uber.org/atomic/internal/gen-atomicint \
go.uber.org/atomic/internal/gen-atomicwrapper
.PHONY: build
build:
go build ./...
.PHONY: test
test:
go test -race ./...
.PHONY: gofmt
gofmt:
$(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
@[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
$(GOLINT):
cd tools && go install golang.org/x/lint/golint
$(STATICCHECK):
cd tools && go install honnef.co/go/tools/cmd/staticcheck
$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
go build -o $@ ./internal/gen-atomicwrapper
$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
go build -o $@ ./internal/gen-atomicint
.PHONY: golint
golint: $(GOLINT)
$(GOLINT) ./...
.PHONY: staticcheck
staticcheck: $(STATICCHECK)
$(STATICCHECK) ./...
.PHONY: lint
lint: gofmt golint staticcheck generatenodirty
# comma separated list of packages to consider for code coverage.
COVER_PKG = $(shell \
go list -find ./... | \
grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
paste -sd, -)
.PHONY: cover
cover:
go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
go tool cover -html=cover.out -o cover.html
.PHONY: generate
generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
go generate ./...
.PHONY: generatenodirty
generatenodirty:
@[ -z "$$(git status --porcelain)" ] || ( \
echo "Working tree is dirty. Commit your changes first."; \
exit 1 )
@make generate
@status=$$(git status --porcelain); \
[ -z "$$status" ] || ( \
echo "Working tree is dirty after `make generate`:"; \
echo "$$status"; \
echo "Please ensure that the generated code is up-to-date." )

63
vendor/go.uber.org/atomic/README.md generated vendored
View File

@ -1,63 +0,0 @@
# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
Simple wrappers for primitive types to enforce atomic access.
## Installation
```shell
$ go get -u go.uber.org/atomic@v1
```
### Legacy Import Path
As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
of using this package. If you are using Go modules, this package will fail to
compile with the legacy import path path `github.com/uber-go/atomic`.
We recommend migrating your code to the new import path but if you're unable
to do so, or if your dependencies are still using the old import path, you
will have to add a `replace` directive to your `go.mod` file downgrading the
legacy import path to an older version.
```
replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
```
You can do so automatically by running the following command.
```shell
$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
```
## Usage
The standard library's `sync/atomic` is powerful, but it's easy to forget which
variables must be accessed atomically. `go.uber.org/atomic` preserves all the
functionality of the standard library, but wraps the primitive types to
provide a safer, more convenient API.
```go
var atom atomic.Uint32
atom.Store(42)
atom.Sub(2)
atom.CAS(40, 11)
```
See the [documentation][doc] for a complete API specification.
## Development Status
Stable.
---
Released under the [MIT License](LICENSE.txt).
[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
[doc]: https://godoc.org/go.uber.org/atomic
[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
[ci]: https://travis-ci.com/uber-go/atomic
[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/atomic
[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
[reportcard]: https://goreportcard.com/report/go.uber.org/atomic

81
vendor/go.uber.org/atomic/bool.go generated vendored
View File

@ -1,81 +0,0 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import (
"encoding/json"
)
// Bool is an atomic type-safe wrapper for bool values.
type Bool struct {
_ nocmp // disallow non-atomic comparison
v Uint32
}
var _zeroBool bool
// NewBool creates a new Bool.
func NewBool(v bool) *Bool {
x := &Bool{}
if v != _zeroBool {
x.Store(v)
}
return x
}
// Load atomically loads the wrapped bool.
func (x *Bool) Load() bool {
return truthy(x.v.Load())
}
// Store atomically stores the passed bool.
func (x *Bool) Store(v bool) {
x.v.Store(boolToInt(v))
}
// CAS is an atomic compare-and-swap for bool values.
func (x *Bool) CAS(o, n bool) bool {
return x.v.CAS(boolToInt(o), boolToInt(n))
}
// Swap atomically stores the given bool and returns the old
// value.
func (x *Bool) Swap(o bool) bool {
return truthy(x.v.Swap(boolToInt(o)))
}
// MarshalJSON encodes the wrapped bool into JSON.
func (x *Bool) MarshalJSON() ([]byte, error) {
return json.Marshal(x.Load())
}
// UnmarshalJSON decodes a bool from JSON.
func (x *Bool) UnmarshalJSON(b []byte) error {
var v bool
if err := json.Unmarshal(b, &v); err != nil {
return err
}
x.Store(v)
return nil
}

View File

@ -1,82 +0,0 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import (
"encoding/json"
"time"
)
// Duration is an atomic type-safe wrapper for time.Duration values.
type Duration struct {
_ nocmp // disallow non-atomic comparison
v Int64
}
var _zeroDuration time.Duration
// NewDuration creates a new Duration.
func NewDuration(v time.Duration) *Duration {
x := &Duration{}
if v != _zeroDuration {
x.Store(v)
}
return x
}
// Load atomically loads the wrapped time.Duration.
func (x *Duration) Load() time.Duration {
return time.Duration(x.v.Load())
}
// Store atomically stores the passed time.Duration.
func (x *Duration) Store(v time.Duration) {
x.v.Store(int64(v))
}
// CAS is an atomic compare-and-swap for time.Duration values.
func (x *Duration) CAS(o, n time.Duration) bool {
return x.v.CAS(int64(o), int64(n))
}
// Swap atomically stores the given time.Duration and returns the old
// value.
func (x *Duration) Swap(o time.Duration) time.Duration {
return time.Duration(x.v.Swap(int64(o)))
}
// MarshalJSON encodes the wrapped time.Duration into JSON.
func (x *Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(x.Load())
}
// UnmarshalJSON decodes a time.Duration from JSON.
func (x *Duration) UnmarshalJSON(b []byte) error {
var v time.Duration
if err := json.Unmarshal(b, &v); err != nil {
return err
}
x.Store(v)
return nil
}

View File

@ -1,40 +0,0 @@
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import "time"
//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
// Add atomically adds to the wrapped time.Duration and returns the new value.
func (d *Duration) Add(n time.Duration) time.Duration {
return time.Duration(d.v.Add(int64(n)))
}
// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
func (d *Duration) Sub(n time.Duration) time.Duration {
return time.Duration(d.v.Sub(int64(n)))
}
// String encodes the wrapped value as a string.
func (d *Duration) String() string {
return d.Load().String()
}

76
vendor/go.uber.org/atomic/float64.go generated vendored
View File

@ -1,76 +0,0 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import (
"encoding/json"
"math"
)
// Float64 is an atomic type-safe wrapper for float64 values.
type Float64 struct {
_ nocmp // disallow non-atomic comparison
v Uint64
}
var _zeroFloat64 float64
// NewFloat64 creates a new Float64.
func NewFloat64(v float64) *Float64 {
x := &Float64{}
if v != _zeroFloat64 {
x.Store(v)
}
return x
}
// Load atomically loads the wrapped float64.
func (x *Float64) Load() float64 {
return math.Float64frombits(x.v.Load())
}
// Store atomically stores the passed float64.
func (x *Float64) Store(v float64) {
x.v.Store(math.Float64bits(v))
}
// CAS is an atomic compare-and-swap for float64 values.
func (x *Float64) CAS(o, n float64) bool {
return x.v.CAS(math.Float64bits(o), math.Float64bits(n))
}
// MarshalJSON encodes the wrapped float64 into JSON.
func (x *Float64) MarshalJSON() ([]byte, error) {
return json.Marshal(x.Load())
}
// UnmarshalJSON decodes a float64 from JSON.
func (x *Float64) UnmarshalJSON(b []byte) error {
var v float64
if err := json.Unmarshal(b, &v); err != nil {
return err
}
x.Store(v)
return nil
}

View File

@ -1,47 +0,0 @@
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import "strconv"
//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go
// Add atomically adds to the wrapped float64 and returns the new value.
func (f *Float64) Add(s float64) float64 {
for {
old := f.Load()
new := old + s
if f.CAS(old, new) {
return new
}
}
}
// Sub atomically subtracts from the wrapped float64 and returns the new value.
func (f *Float64) Sub(s float64) float64 {
return f.Add(-s)
}
// String encodes the wrapped value as a string.
func (f *Float64) String() string {
// 'g' is the behavior for floats with %v.
return strconv.FormatFloat(f.Load(), 'g', -1, 64)
}

26
vendor/go.uber.org/atomic/gen.go generated vendored
View File

@ -1,26 +0,0 @@
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go

102
vendor/go.uber.org/atomic/int32.go generated vendored
View File

@ -1,102 +0,0 @@
// @generated Code generated by gen-atomicint.
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import (
"encoding/json"
"strconv"
"sync/atomic"
)
// Int32 is an atomic wrapper around int32.
type Int32 struct {
_ nocmp // disallow non-atomic comparison
v int32
}
// NewInt32 creates a new Int32.
func NewInt32(i int32) *Int32 {
return &Int32{v: i}
}
// Load atomically loads the wrapped value.
func (i *Int32) Load() int32 {
return atomic.LoadInt32(&i.v)
}
// Add atomically adds to the wrapped int32 and returns the new value.
func (i *Int32) Add(n int32) int32 {
return atomic.AddInt32(&i.v, n)
}
// Sub atomically subtracts from the wrapped int32 and returns the new value.
func (i *Int32) Sub(n int32) int32 {
return atomic.AddInt32(&i.v, -n)
}
// Inc atomically increments the wrapped int32 and returns the new value.
func (i *Int32) Inc() int32 {
return i.Add(1)
}
// Dec atomically decrements the wrapped int32 and returns the new value.
func (i *Int32) Dec() int32 {
return i.Sub(1)
}
// CAS is an atomic compare-and-swap.
func (i *Int32) CAS(old, new int32) bool {
return atomic.CompareAndSwapInt32(&i.v, old, new)
}
// Store atomically stores the passed value.
func (i *Int32) Store(n int32) {
atomic.StoreInt32(&i.v, n)
}
// Swap atomically swaps the wrapped int32 and returns the old value.
func (i *Int32) Swap(n int32) int32 {
return atomic.SwapInt32(&i.v, n)
}
// MarshalJSON encodes the wrapped int32 into JSON.
func (i *Int32) MarshalJSON() ([]byte, error) {
return json.Marshal(i.Load())
}
// UnmarshalJSON decodes JSON into the wrapped int32.
func (i *Int32) UnmarshalJSON(b []byte) error {
var v int32
if err := json.Unmarshal(b, &v); err != nil {
return err
}
i.Store(v)
return nil
}
// String encodes the wrapped value as a string.
func (i *Int32) String() string {
v := i.Load()
return strconv.FormatInt(int64(v), 10)
}

102
vendor/go.uber.org/atomic/int64.go generated vendored
View File

@ -1,102 +0,0 @@
// @generated Code generated by gen-atomicint.
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import (
"encoding/json"
"strconv"
"sync/atomic"
)
// Int64 is an atomic wrapper around int64.
type Int64 struct {
_ nocmp // disallow non-atomic comparison
v int64
}
// NewInt64 creates a new Int64.
func NewInt64(i int64) *Int64 {
return &Int64{v: i}
}
// Load atomically loads the wrapped value.
func (i *Int64) Load() int64 {
return atomic.LoadInt64(&i.v)
}
// Add atomically adds to the wrapped int64 and returns the new value.
func (i *Int64) Add(n int64) int64 {
return atomic.AddInt64(&i.v, n)
}
// Sub atomically subtracts from the wrapped int64 and returns the new value.
func (i *Int64) Sub(n int64) int64 {
return atomic.AddInt64(&i.v, -n)
}
// Inc atomically increments the wrapped int64 and returns the new value.
func (i *Int64) Inc() int64 {
return i.Add(1)
}
// Dec atomically decrements the wrapped int64 and returns the new value.
func (i *Int64) Dec() int64 {
return i.Sub(1)
}
// CAS is an atomic compare-and-swap.
func (i *Int64) CAS(old, new int64) bool {
return atomic.CompareAndSwapInt64(&i.v, old, new)
}
// Store atomically stores the passed value.
func (i *Int64) Store(n int64) {
atomic.StoreInt64(&i.v, n)
}
// Swap atomically swaps the wrapped int64 and returns the old value.
func (i *Int64) Swap(n int64) int64 {
return atomic.SwapInt64(&i.v, n)
}
// MarshalJSON encodes the wrapped int64 into JSON.
func (i *Int64) MarshalJSON() ([]byte, error) {
return json.Marshal(i.Load())
}
// UnmarshalJSON decodes JSON into the wrapped int64.
func (i *Int64) UnmarshalJSON(b []byte) error {
var v int64
if err := json.Unmarshal(b, &v); err != nil {
return err
}
i.Store(v)
return nil
}
// String encodes the wrapped value as a string.
func (i *Int64) String() string {
v := i.Load()
return strconv.FormatInt(int64(v), 10)
}

35
vendor/go.uber.org/atomic/nocmp.go generated vendored
View File

@ -1,35 +0,0 @@
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
// nocmp is an uncomparable struct. Embed this inside another struct to make
// it uncomparable.
//
// type Foo struct {
// nocmp
// // ...
// }
//
// This DOES NOT:
//
// - Disallow shallow copies of structs
// - Disallow comparison of pointers to uncomparable structs
type nocmp [0]func()

54
vendor/go.uber.org/atomic/string.go generated vendored
View File

@ -1,54 +0,0 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
// String is an atomic type-safe wrapper for string values.
type String struct {
_ nocmp // disallow non-atomic comparison
v Value
}
var _zeroString string
// NewString creates a new String.
func NewString(v string) *String {
x := &String{}
if v != _zeroString {
x.Store(v)
}
return x
}
// Load atomically loads the wrapped string.
func (x *String) Load() string {
if v := x.v.Load(); v != nil {
return v.(string)
}
return _zeroString
}
// Store atomically stores the passed string.
func (x *String) Store(v string) {
x.v.Store(v)
}

View File

@ -1,43 +0,0 @@
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
// String returns the wrapped value.
func (s *String) String() string {
return s.Load()
}
// MarshalText encodes the wrapped string into a textual form.
//
// This makes it encodable as JSON, YAML, XML, and more.
func (s *String) MarshalText() ([]byte, error) {
return []byte(s.Load()), nil
}
// UnmarshalText decodes text and replaces the wrapped string with it.
//
// This makes it decodable from JSON, YAML, XML, and more.
func (s *String) UnmarshalText(b []byte) error {
s.Store(string(b))
return nil
}

102
vendor/go.uber.org/atomic/uint32.go generated vendored
View File

@ -1,102 +0,0 @@
// @generated Code generated by gen-atomicint.
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import (
"encoding/json"
"strconv"
"sync/atomic"
)
// Uint32 is an atomic wrapper around uint32.
type Uint32 struct {
_ nocmp // disallow non-atomic comparison
v uint32
}
// NewUint32 creates a new Uint32.
func NewUint32(i uint32) *Uint32 {
return &Uint32{v: i}
}
// Load atomically loads the wrapped value.
func (i *Uint32) Load() uint32 {
return atomic.LoadUint32(&i.v)
}
// Add atomically adds to the wrapped uint32 and returns the new value.
func (i *Uint32) Add(n uint32) uint32 {
return atomic.AddUint32(&i.v, n)
}
// Sub atomically subtracts from the wrapped uint32 and returns the new value.
func (i *Uint32) Sub(n uint32) uint32 {
return atomic.AddUint32(&i.v, ^(n - 1))
}
// Inc atomically increments the wrapped uint32 and returns the new value.
func (i *Uint32) Inc() uint32 {
return i.Add(1)
}
// Dec atomically decrements the wrapped uint32 and returns the new value.
func (i *Uint32) Dec() uint32 {
return i.Sub(1)
}
// CAS is an atomic compare-and-swap.
func (i *Uint32) CAS(old, new uint32) bool {
return atomic.CompareAndSwapUint32(&i.v, old, new)
}
// Store atomically stores the passed value.
func (i *Uint32) Store(n uint32) {
atomic.StoreUint32(&i.v, n)
}
// Swap atomically swaps the wrapped uint32 and returns the old value.
func (i *Uint32) Swap(n uint32) uint32 {
return atomic.SwapUint32(&i.v, n)
}
// MarshalJSON encodes the wrapped uint32 into JSON.
func (i *Uint32) MarshalJSON() ([]byte, error) {
return json.Marshal(i.Load())
}
// UnmarshalJSON decodes JSON into the wrapped uint32.
func (i *Uint32) UnmarshalJSON(b []byte) error {
var v uint32
if err := json.Unmarshal(b, &v); err != nil {
return err
}
i.Store(v)
return nil
}
// String encodes the wrapped value as a string.
func (i *Uint32) String() string {
v := i.Load()
return strconv.FormatUint(uint64(v), 10)
}

102
vendor/go.uber.org/atomic/uint64.go generated vendored
View File

@ -1,102 +0,0 @@
// @generated Code generated by gen-atomicint.
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import (
"encoding/json"
"strconv"
"sync/atomic"
)
// Uint64 is an atomic wrapper around uint64.
type Uint64 struct {
_ nocmp // disallow non-atomic comparison
v uint64
}
// NewUint64 creates a new Uint64.
func NewUint64(i uint64) *Uint64 {
return &Uint64{v: i}
}
// Load atomically loads the wrapped value.
func (i *Uint64) Load() uint64 {
return atomic.LoadUint64(&i.v)
}
// Add atomically adds to the wrapped uint64 and returns the new value.
func (i *Uint64) Add(n uint64) uint64 {
return atomic.AddUint64(&i.v, n)
}
// Sub atomically subtracts from the wrapped uint64 and returns the new value.
func (i *Uint64) Sub(n uint64) uint64 {
return atomic.AddUint64(&i.v, ^(n - 1))
}
// Inc atomically increments the wrapped uint64 and returns the new value.
func (i *Uint64) Inc() uint64 {
return i.Add(1)
}
// Dec atomically decrements the wrapped uint64 and returns the new value.
func (i *Uint64) Dec() uint64 {
return i.Sub(1)
}
// CAS is an atomic compare-and-swap.
func (i *Uint64) CAS(old, new uint64) bool {
return atomic.CompareAndSwapUint64(&i.v, old, new)
}
// Store atomically stores the passed value.
func (i *Uint64) Store(n uint64) {
atomic.StoreUint64(&i.v, n)
}
// Swap atomically swaps the wrapped uint64 and returns the old value.
func (i *Uint64) Swap(n uint64) uint64 {
return atomic.SwapUint64(&i.v, n)
}
// MarshalJSON encodes the wrapped uint64 into JSON.
func (i *Uint64) MarshalJSON() ([]byte, error) {
return json.Marshal(i.Load())
}
// UnmarshalJSON decodes JSON into the wrapped uint64.
func (i *Uint64) UnmarshalJSON(b []byte) error {
var v uint64
if err := json.Unmarshal(b, &v); err != nil {
return err
}
i.Store(v)
return nil
}
// String encodes the wrapped value as a string.
func (i *Uint64) String() string {
v := i.Load()
return strconv.FormatUint(uint64(v), 10)
}

31
vendor/go.uber.org/atomic/value.go generated vendored
View File

@ -1,31 +0,0 @@
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import "sync/atomic"
// Value shadows the type of the same name from sync/atomic
// https://godoc.org/sync/atomic#Value
type Value struct {
atomic.Value
_ nocmp // disallow non-atomic comparison
}

View File

@ -1,23 +0,0 @@
sudo: false
language: go
go_import_path: go.uber.org/multierr
env:
global:
- GO111MODULE=on
go:
- oldstable
- stable
before_install:
- go version
script:
- |
set -e
make lint
make cover
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,6 +1,34 @@
Releases Releases
======== ========
v1.10.0 (2023-03-08)
====================
- Comply with Go 1.20's multiple-error interface.
- Drop Go 1.18 support.
Per the support policy, only Go 1.19 and 1.20 are supported now.
- Drop all non-test external dependencies.
v1.9.0 (2022-12-12)
===================
- Add `AppendFunc` that allow passsing functions to similar to
`AppendInvoke`.
- Bump up yaml.v3 dependency to 3.0.1.
v1.8.0 (2022-02-28)
===================
- `Combine`: perform zero allocations when there are no errors.
v1.7.0 (2021-05-06)
===================
- Add `AppendInvoke` to append into errors from `defer` blocks.
v1.6.0 (2020-09-14) v1.6.0 (2020-09-14)
=================== ===================

View File

@ -1,4 +1,4 @@
Copyright (c) 2017 Uber Technologies, Inc. Copyright (c) 2017-2021 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -34,9 +34,5 @@ lint: gofmt golint staticcheck
.PHONY: cover .PHONY: cover
cover: cover:
go test -coverprofile=cover.out -coverpkg=./... -v ./... go test -race -coverprofile=cover.out -coverpkg=./... -v ./...
go tool cover -html=cover.out -o cover.html go tool cover -html=cover.out -o cover.html
update-license:
@cd tools && go install go.uber.org/tools/update-license
@$(GOBIN)/update-license $(GO_FILES)

View File

@ -2,9 +2,29 @@
`multierr` allows combining one or more Go `error`s together. `multierr` allows combining one or more Go `error`s together.
## Features
- **Idiomatic**:
multierr follows best practices in Go, and keeps your code idiomatic.
- It keeps the underlying error type hidden,
allowing you to deal in `error` values exclusively.
- It provides APIs to safely append into an error from a `defer` statement.
- **Performant**:
multierr is optimized for performance:
- It avoids allocations where possible.
- It utilizes slice resizing semantics to optimize common cases
like appending into the same error object from a loop.
- **Interoperable**:
multierr interoperates with the Go standard library's error APIs seamlessly:
- The `errors.Is` and `errors.As` functions *just work*.
- **Lightweight**:
multierr comes with virtually no dependencies.
## Installation ## Installation
go get -u go.uber.org/multierr ```bash
go get -u go.uber.org/multierr@latest
```
## Status ## Status
@ -15,9 +35,9 @@ Stable: No breaking changes will be made before 2.0.
Released under the [MIT License]. Released under the [MIT License].
[MIT License]: LICENSE.txt [MIT License]: LICENSE.txt
[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg [doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr
[doc]: https://godoc.org/go.uber.org/multierr [doc]: https://pkg.go.dev/go.uber.org/multierr
[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master [ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg
[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg [cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
[ci]: https://travis-ci.com/uber-go/multierr [ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml
[cov]: https://codecov.io/gh/uber-go/multierr [cov]: https://codecov.io/gh/uber-go/multierr

381
vendor/go.uber.org/multierr/error.go generated vendored
View File

@ -1,4 +1,4 @@
// Copyright (c) 2019 Uber Technologies, Inc. // Copyright (c) 2017-2023 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -20,54 +20,109 @@
// Package multierr allows combining one or more errors together. // Package multierr allows combining one or more errors together.
// //
// Overview // # Overview
// //
// Errors can be combined with the use of the Combine function. // Errors can be combined with the use of the Combine function.
// //
// multierr.Combine( // multierr.Combine(
// reader.Close(), // reader.Close(),
// writer.Close(), // writer.Close(),
// conn.Close(), // conn.Close(),
// ) // )
// //
// If only two errors are being combined, the Append function may be used // If only two errors are being combined, the Append function may be used
// instead. // instead.
// //
// err = multierr.Append(reader.Close(), writer.Close()) // err = multierr.Append(reader.Close(), writer.Close())
//
// This makes it possible to record resource cleanup failures from deferred
// blocks with the help of named return values.
//
// func sendRequest(req Request) (err error) {
// conn, err := openConnection()
// if err != nil {
// return err
// }
// defer func() {
// err = multierr.Append(err, conn.Close())
// }()
// // ...
// }
// //
// The underlying list of errors for a returned error object may be retrieved // The underlying list of errors for a returned error object may be retrieved
// with the Errors function. // with the Errors function.
// //
// errors := multierr.Errors(err) // errors := multierr.Errors(err)
// if len(errors) > 0 { // if len(errors) > 0 {
// fmt.Println("The following errors occurred:", errors) // fmt.Println("The following errors occurred:", errors)
// } // }
// //
// Advanced Usage // # Appending from a loop
//
// You sometimes need to append into an error from a loop.
//
// var err error
// for _, item := range items {
// err = multierr.Append(err, process(item))
// }
//
// Cases like this may require knowledge of whether an individual instance
// failed. This usually requires introduction of a new variable.
//
// var err error
// for _, item := range items {
// if perr := process(item); perr != nil {
// log.Warn("skipping item", item)
// err = multierr.Append(err, perr)
// }
// }
//
// multierr includes AppendInto to simplify cases like this.
//
// var err error
// for _, item := range items {
// if multierr.AppendInto(&err, process(item)) {
// log.Warn("skipping item", item)
// }
// }
//
// This will append the error into the err variable, and return true if that
// individual error was non-nil.
//
// See [AppendInto] for more information.
//
// # Deferred Functions
//
// Go makes it possible to modify the return value of a function in a defer
// block if the function was using named returns. This makes it possible to
// record resource cleanup failures from deferred blocks.
//
// func sendRequest(req Request) (err error) {
// conn, err := openConnection()
// if err != nil {
// return err
// }
// defer func() {
// err = multierr.Append(err, conn.Close())
// }()
// // ...
// }
//
// multierr provides the Invoker type and AppendInvoke function to make cases
// like the above simpler and obviate the need for a closure. The following is
// roughly equivalent to the example above.
//
// func sendRequest(req Request) (err error) {
// conn, err := openConnection()
// if err != nil {
// return err
// }
// defer multierr.AppendInvoke(&err, multierr.Close(conn))
// // ...
// }
//
// See [AppendInvoke] and [Invoker] for more information.
//
// NOTE: If you're modifying an error from inside a defer, you MUST use a named
// return value for that function.
//
// # Advanced Usage
// //
// Errors returned by Combine and Append MAY implement the following // Errors returned by Combine and Append MAY implement the following
// interface. // interface.
// //
// type errorGroup interface { // type errorGroup interface {
// // Returns a slice containing the underlying list of errors. // // Returns a slice containing the underlying list of errors.
// // // //
// // This slice MUST NOT be modified by the caller. // // This slice MUST NOT be modified by the caller.
// Errors() []error // Errors() []error
// } // }
// //
// Note that if you need access to list of errors behind a multierr error, you // Note that if you need access to list of errors behind a multierr error, you
// should prefer using the Errors function. That said, if you need cheap // should prefer using the Errors function. That said, if you need cheap
@ -76,13 +131,13 @@
// because errors returned by Combine and Append are not guaranteed to // because errors returned by Combine and Append are not guaranteed to
// implement this interface. // implement this interface.
// //
// var errors []error // var errors []error
// group, ok := err.(errorGroup) // group, ok := err.(errorGroup)
// if ok { // if ok {
// errors = group.Errors() // errors = group.Errors()
// } else { // } else {
// errors = []error{err} // errors = []error{err}
// } // }
package multierr // import "go.uber.org/multierr" package multierr // import "go.uber.org/multierr"
import ( import (
@ -91,8 +146,7 @@ import (
"io" "io"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"go.uber.org/atomic"
) )
var ( var (
@ -132,8 +186,8 @@ type errorGroup interface {
// Errors returns a slice containing zero or more errors that the supplied // Errors returns a slice containing zero or more errors that the supplied
// error is composed of. If the error is nil, a nil slice is returned. // error is composed of. If the error is nil, a nil slice is returned.
// //
// err := multierr.Append(r.Close(), w.Close()) // err := multierr.Append(r.Close(), w.Close())
// errors := multierr.Errors(err) // errors := multierr.Errors(err)
// //
// If the error is not composed of other errors, the returned slice contains // If the error is not composed of other errors, the returned slice contains
// just the error that was passed in. // just the error that was passed in.
@ -156,10 +210,7 @@ func Errors(err error) []error {
return []error{err} return []error{err}
} }
errors := eg.Errors() return append(([]error)(nil), eg.Errors()...)
result := make([]error, len(errors))
copy(result, errors)
return result
} }
// multiError is an error that holds one or more errors. // multiError is an error that holds one or more errors.
@ -292,6 +343,14 @@ func inspect(errors []error) (res inspectResult) {
// fromSlice converts the given list of errors into a single error. // fromSlice converts the given list of errors into a single error.
func fromSlice(errors []error) error { func fromSlice(errors []error) error {
// Don't pay to inspect small slices.
switch len(errors) {
case 0:
return nil
case 1:
return errors[0]
}
res := inspect(errors) res := inspect(errors)
switch res.Count { switch res.Count {
case 0: case 0:
@ -301,8 +360,12 @@ func fromSlice(errors []error) error {
return errors[res.FirstErrorIdx] return errors[res.FirstErrorIdx]
case len(errors): case len(errors):
if !res.ContainsMultiError { if !res.ContainsMultiError {
// already flat // Error list is flat. Make a copy of it
return &multiError{errors: errors} // Otherwise "errors" escapes to the heap
// unconditionally for all other cases.
// This lets us optimize for the "no errors" case.
out := append(([]error)(nil), errors...)
return &multiError{errors: out}
} }
} }
@ -327,32 +390,32 @@ func fromSlice(errors []error) error {
// If zero arguments were passed or if all items are nil, a nil error is // If zero arguments were passed or if all items are nil, a nil error is
// returned. // returned.
// //
// Combine(nil, nil) // == nil // Combine(nil, nil) // == nil
// //
// If only a single error was passed, it is returned as-is. // If only a single error was passed, it is returned as-is.
// //
// Combine(err) // == err // Combine(err) // == err
// //
// Combine skips over nil arguments so this function may be used to combine // Combine skips over nil arguments so this function may be used to combine
// together errors from operations that fail independently of each other. // together errors from operations that fail independently of each other.
// //
// multierr.Combine( // multierr.Combine(
// reader.Close(), // reader.Close(),
// writer.Close(), // writer.Close(),
// pipe.Close(), // pipe.Close(),
// ) // )
// //
// If any of the passed errors is a multierr error, it will be flattened along // If any of the passed errors is a multierr error, it will be flattened along
// with the other errors. // with the other errors.
// //
// multierr.Combine(multierr.Combine(err1, err2), err3) // multierr.Combine(multierr.Combine(err1, err2), err3)
// // is the same as // // is the same as
// multierr.Combine(err1, err2, err3) // multierr.Combine(err1, err2, err3)
// //
// The returned error formats into a readable multi-line error message if // The returned error formats into a readable multi-line error message if
// formatted with %+v. // formatted with %+v.
// //
// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) // fmt.Sprintf("%+v", multierr.Combine(err1, err2))
func Combine(errors ...error) error { func Combine(errors ...error) error {
return fromSlice(errors) return fromSlice(errors)
} }
@ -362,16 +425,19 @@ func Combine(errors ...error) error {
// This function is a specialization of Combine for the common case where // This function is a specialization of Combine for the common case where
// there are only two errors. // there are only two errors.
// //
// err = multierr.Append(reader.Close(), writer.Close()) // err = multierr.Append(reader.Close(), writer.Close())
// //
// The following pattern may also be used to record failure of deferred // The following pattern may also be used to record failure of deferred
// operations without losing information about the original error. // operations without losing information about the original error.
// //
// func doSomething(..) (err error) { // func doSomething(..) (err error) {
// f := acquireResource() // f := acquireResource()
// defer func() { // defer func() {
// err = multierr.Append(err, f.Close()) // err = multierr.Append(err, f.Close())
// }() // }()
//
// Note that the variable MUST be a named return to append an error to it from
// the defer statement. See also [AppendInvoke].
func Append(left error, right error) error { func Append(left error, right error) error {
switch { switch {
case left == nil: case left == nil:
@ -401,37 +467,37 @@ func Append(left error, right error) error {
// AppendInto appends an error into the destination of an error pointer and // AppendInto appends an error into the destination of an error pointer and
// returns whether the error being appended was non-nil. // returns whether the error being appended was non-nil.
// //
// var err error // var err error
// multierr.AppendInto(&err, r.Close()) // multierr.AppendInto(&err, r.Close())
// multierr.AppendInto(&err, w.Close()) // multierr.AppendInto(&err, w.Close())
// //
// The above is equivalent to, // The above is equivalent to,
// //
// err := multierr.Append(r.Close(), w.Close()) // err := multierr.Append(r.Close(), w.Close())
// //
// As AppendInto reports whether the provided error was non-nil, it may be // As AppendInto reports whether the provided error was non-nil, it may be
// used to build a multierr error in a loop more ergonomically. For example: // used to build a multierr error in a loop more ergonomically. For example:
// //
// var err error // var err error
// for line := range lines { // for line := range lines {
// var item Item // var item Item
// if multierr.AppendInto(&err, parse(line, &item)) { // if multierr.AppendInto(&err, parse(line, &item)) {
// continue // continue
// } // }
// items = append(items, item) // items = append(items, item)
// } // }
// //
// Compare this with a verison that relies solely on Append: // Compare this with a version that relies solely on Append:
// //
// var err error // var err error
// for line := range lines { // for line := range lines {
// var item Item // var item Item
// if parseErr := parse(line, &item); parseErr != nil { // if parseErr := parse(line, &item); parseErr != nil {
// err = multierr.Append(err, parseErr) // err = multierr.Append(err, parseErr)
// continue // continue
// } // }
// items = append(items, item) // items = append(items, item)
// } // }
func AppendInto(into *error, err error) (errored bool) { func AppendInto(into *error, err error) (errored bool) {
if into == nil { if into == nil {
// We panic if 'into' is nil. This is not documented above // We panic if 'into' is nil. This is not documented above
@ -447,3 +513,140 @@ func AppendInto(into *error, err error) (errored bool) {
*into = Append(*into, err) *into = Append(*into, err)
return true return true
} }
// Invoker is an operation that may fail with an error. Use it with
// AppendInvoke to append the result of calling the function into an error.
// This allows you to conveniently defer capture of failing operations.
//
// See also, [Close] and [Invoke].
type Invoker interface {
Invoke() error
}
// Invoke wraps a function which may fail with an error to match the Invoker
// interface. Use it to supply functions matching this signature to
// AppendInvoke.
//
// For example,
//
// func processReader(r io.Reader) (err error) {
// scanner := bufio.NewScanner(r)
// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
// for scanner.Scan() {
// // ...
// }
// // ...
// }
//
// In this example, the following line will construct the Invoker right away,
// but defer the invocation of scanner.Err() until the function returns.
//
// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
//
// Note that the error you're appending to from the defer statement MUST be a
// named return.
type Invoke func() error
// Invoke calls the supplied function and returns its result.
func (i Invoke) Invoke() error { return i() }
// Close builds an Invoker that closes the provided io.Closer. Use it with
// AppendInvoke to close io.Closers and append their results into an error.
//
// For example,
//
// func processFile(path string) (err error) {
// f, err := os.Open(path)
// if err != nil {
// return err
// }
// defer multierr.AppendInvoke(&err, multierr.Close(f))
// return processReader(f)
// }
//
// In this example, multierr.Close will construct the Invoker right away, but
// defer the invocation of f.Close until the function returns.
//
// defer multierr.AppendInvoke(&err, multierr.Close(f))
//
// Note that the error you're appending to from the defer statement MUST be a
// named return.
func Close(closer io.Closer) Invoker {
return Invoke(closer.Close)
}
// AppendInvoke appends the result of calling the given Invoker into the
// provided error pointer. Use it with named returns to safely defer
// invocation of fallible operations until a function returns, and capture the
// resulting errors.
//
// func doSomething(...) (err error) {
// // ...
// f, err := openFile(..)
// if err != nil {
// return err
// }
//
// // multierr will call f.Close() when this function returns and
// // if the operation fails, its append its error into the
// // returned error.
// defer multierr.AppendInvoke(&err, multierr.Close(f))
//
// scanner := bufio.NewScanner(f)
// // Similarly, this scheduled scanner.Err to be called and
// // inspected when the function returns and append its error
// // into the returned error.
// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
//
// // ...
// }
//
// NOTE: If used with a defer, the error variable MUST be a named return.
//
// Without defer, AppendInvoke behaves exactly like AppendInto.
//
// err := // ...
// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
//
// // ...is roughly equivalent to...
//
// err := // ...
// multierr.AppendInto(&err, foo())
//
// The advantage of the indirection introduced by Invoker is to make it easy
// to defer the invocation of a function. Without this indirection, the
// invoked function will be evaluated at the time of the defer block rather
// than when the function returns.
//
// // BAD: This is likely not what the caller intended. This will evaluate
// // foo() right away and append its result into the error when the
// // function returns.
// defer multierr.AppendInto(&err, foo())
//
// // GOOD: This will defer invocation of foo unutil the function returns.
// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
//
// multierr provides a few Invoker implementations out of the box for
// convenience. See [Invoker] for more information.
func AppendInvoke(into *error, invoker Invoker) {
AppendInto(into, invoker.Invoke())
}
// AppendFunc is a shorthand for [AppendInvoke].
// It allows using function or method value directly
// without having to wrap it into an [Invoker] interface.
//
// func doSomething(...) (err error) {
// w, err := startWorker(...)
// if err != nil {
// return err
// }
//
// // multierr will call w.Stop() when this function returns and
// // if the operation fails, it appends its error into the
// // returned error.
// defer multierr.AppendFunc(&err, w.Stop)
// }
func AppendFunc(into *error, fn func() error) {
AppendInvoke(into, Invoke(fn))
}

View File

@ -1,4 +1,4 @@
// Copyright (c) 2020 Uber Technologies, Inc. // Copyright (c) 2017-2023 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -18,6 +18,12 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. // THE SOFTWARE.
// Package atomic provides simple wrappers around numerics to enforce atomic //go:build go1.20
// access. // +build go1.20
package atomic
package multierr
// Unwrap returns a list of errors wrapped by this multierr.
func (merr *multiError) Unwrap() []error {
return merr.Errors()
}

View File

@ -1,4 +1,4 @@
// Copyright (c) 2019 Uber Technologies, Inc. // Copyright (c) 2017-2023 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -18,12 +18,19 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. // THE SOFTWARE.
// +build go1.13 //go:build !go1.20
// +build !go1.20
package multierr package multierr
import "errors" import "errors"
// Versions of Go before 1.20 did not support the Unwrap() []error method.
// This provides a similar behavior by implementing the Is(..) and As(..)
// methods.
// See the errors.Join proposal for details:
// https://github.com/golang/go/issues/53435
// As attempts to find the first error in the error list that matches the type // As attempts to find the first error in the error list that matches the type
// of the value that target points to. // of the value that target points to.
// //

View File

@ -1,8 +0,0 @@
package: go.uber.org/multierr
import:
- package: go.uber.org/atomic
version: ^1
testImport:
- package: github.com/stretchr/testify
subpackages:
- assert

77
vendor/go.uber.org/zap/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,77 @@
output:
# Make output more digestible with quickfix in vim/emacs/etc.
sort-results: true
print-issued-lines: false
linters:
# We'll track the golangci-lint default linters manually
# instead of letting them change without our control.
disable-all: true
enable:
# golangci-lint defaults:
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- unused
# Our own extras:
- gofmt
- nolintlint # lints nolint directives
- revive
linters-settings:
govet:
# These govet checks are disabled by default, but they're useful.
enable:
- niliness
- reflectvaluecompare
- sortslice
- unusedwrite
errcheck:
exclude-functions:
# These methods can not fail.
# They operate on an in-memory buffer.
- (*go.uber.org/zap/buffer.Buffer).Write
- (*go.uber.org/zap/buffer.Buffer).WriteByte
- (*go.uber.org/zap/buffer.Buffer).WriteString
- (*go.uber.org/zap/zapio.Writer).Close
- (*go.uber.org/zap/zapio.Writer).Sync
- (*go.uber.org/zap/zapio.Writer).Write
# Write to zapio.Writer cannot fail,
# so io.WriteString on it cannot fail.
- io.WriteString(*go.uber.org/zap/zapio.Writer)
# Writing a plain string to a fmt.State cannot fail.
- io.WriteString(fmt.State)
issues:
# Print all issues reported by all linters.
max-issues-per-linter: 0
max-same-issues: 0
# Don't ignore some of the issues that golangci-lint considers okay.
# This includes documenting all exported entities.
exclude-use-default: false
exclude-rules:
# Don't warn on unused parameters.
# Parameter names are useful; replacing them with '_' is undesirable.
- linters: [revive]
text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _'
# staticcheck already has smarter checks for empty blocks.
# revive's empty-block linter has false positives.
# For example, as of writing this, the following is not allowed.
# for foo() { }
- linters: [revive]
text: 'empty-block: this block is empty, you can remove it'
# Ignore logger.Sync() errcheck failures in example_test.go
# since those are intended to be uncomplicated examples.
- linters: [errcheck]
path: example_test.go
text: 'Error return value of `logger.Sync` is not checked'

293
vendor/go.uber.org/zap/CHANGELOG.md generated vendored
View File

@ -1,7 +1,91 @@
# Changelog # Changelog
All notable changes to this project will be documented in this file. All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## 1.26.0 (14 Sep 2023)
Enhancements:
* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
context.
* [#1350][]: String encoding is much (~50%) faster now.
Thanks to @jquirke, @cdvr1993 for their contributions to this release.
[#1319]: https://github.com/uber-go/zap/pull/1319
[#1350]: https://github.com/uber-go/zap/pull/1350
## 1.25.0 (1 Aug 2023)
This release contains several improvements including performance, API additions,
and two new experimental packages whose APIs are unstable and may change in the
future.
Enhancements:
* [#1246][]: Add `zap/exp/zapslog` package for integration with slog.
* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set.
* [#1281][]: Add `zap/exp/expfield` package which contains helper methods
`Str` and `Strs` for constructing String-like zap.Fields.
* [#1310][]: Reduce stack size on `Any`.
Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions
to this release.
[#1246]: https://github.com/uber-go/zap/pull/1246
[#1273]: https://github.com/uber-go/zap/pull/1273
[#1281]: https://github.com/uber-go/zap/pull/1281
[#1310]: https://github.com/uber-go/zap/pull/1310
## 1.24.0 (30 Nov 2022)
Enhancements:
* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
current minimum enabled log level.
* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
contributions to this release.
[#1148]: https://github.coml/uber-go/zap/pull/1148
[#1185]: https://github.coml/uber-go/zap/pull/1185
## 1.23.0 (24 Aug 2022)
Enhancements:
* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
`LevelEnabler` or `Core`.
* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
that implement `String() string`.
[#1147]: https://github.com/uber-go/zap/pull/1147
[#1155]: https://github.com/uber-go/zap/pull/1155
## 1.22.0 (8 Aug 2022)
Enhancements:
* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
arrays of objects. With these two constructors, you don't need to implement
`zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
`zapcore.ObjectMarshaler`.
* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
`SugaredLogger` with the provided options applied.
* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
These functions provide a string joining behavior similar to `fmt.Println`.
* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
logger for `Fatal`-level log entries. This defaults to exiting the program.
* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
`NewDevelopment` to panic if the system was unable to build the logger.
* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
a statement dynamically.
Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
for their contributions to this release.
[#1071]: https://github.com/uber-go/zap/pull/1071
[#1079]: https://github.com/uber-go/zap/pull/1079
[#1080]: https://github.com/uber-go/zap/pull/1080
[#1088]: https://github.com/uber-go/zap/pull/1088
[#1108]: https://github.com/uber-go/zap/pull/1108
[#1118]: https://github.com/uber-go/zap/pull/1118
## 1.21.0 (7 Feb 2022) ## 1.21.0 (7 Feb 2022)
@ -123,6 +207,16 @@ Enhancements:
Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
[#865]: https://github.com/uber-go/zap/pull/865
[#867]: https://github.com/uber-go/zap/pull/867
[#881]: https://github.com/uber-go/zap/pull/881
[#903]: https://github.com/uber-go/zap/pull/903
[#912]: https://github.com/uber-go/zap/pull/912
[#913]: https://github.com/uber-go/zap/pull/913
[#928]: https://github.com/uber-go/zap/pull/928
[#931]: https://github.com/uber-go/zap/pull/931
[#936]: https://github.com/uber-go/zap/pull/936
## 1.16.0 (1 Sep 2020) ## 1.16.0 (1 Sep 2020)
Bugfixes: Bugfixes:
@ -144,6 +238,17 @@ Enhancements:
Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
[#629]: https://github.com/uber-go/zap/pull/629
[#697]: https://github.com/uber-go/zap/pull/697
[#828]: https://github.com/uber-go/zap/pull/828
[#835]: https://github.com/uber-go/zap/pull/835
[#843]: https://github.com/uber-go/zap/pull/843
[#844]: https://github.com/uber-go/zap/pull/844
[#852]: https://github.com/uber-go/zap/pull/852
[#854]: https://github.com/uber-go/zap/pull/854
[#861]: https://github.com/uber-go/zap/pull/861
[#862]: https://github.com/uber-go/zap/pull/862
## 1.15.0 (23 Apr 2020) ## 1.15.0 (23 Apr 2020)
Bugfixes: Bugfixes:
@ -160,6 +265,11 @@ Enhancements:
Thanks to @danielbprice for their contributions to this release. Thanks to @danielbprice for their contributions to this release.
[#804]: https://github.com/uber-go/zap/pull/804
[#812]: https://github.com/uber-go/zap/pull/812
[#806]: https://github.com/uber-go/zap/pull/806
[#813]: https://github.com/uber-go/zap/pull/813
## 1.14.1 (14 Mar 2020) ## 1.14.1 (14 Mar 2020)
Bugfixes: Bugfixes:
@ -172,6 +282,10 @@ Bugfixes:
Thanks to @YashishDua for their contributions to this release. Thanks to @YashishDua for their contributions to this release.
[#791]: https://github.com/uber-go/zap/pull/791
[#795]: https://github.com/uber-go/zap/pull/795
[#799]: https://github.com/uber-go/zap/pull/799
## 1.14.0 (20 Feb 2020) ## 1.14.0 (20 Feb 2020)
Enhancements: Enhancements:
@ -182,6 +296,11 @@ Enhancements:
Thanks to @caibirdme for their contributions to this release. Thanks to @caibirdme for their contributions to this release.
[#771]: https://github.com/uber-go/zap/pull/771
[#773]: https://github.com/uber-go/zap/pull/773
[#775]: https://github.com/uber-go/zap/pull/775
[#786]: https://github.com/uber-go/zap/pull/786
## 1.13.0 (13 Nov 2019) ## 1.13.0 (13 Nov 2019)
Enhancements: Enhancements:
@ -190,11 +309,15 @@ Enhancements:
Thanks to @jbizzle for their contributions to this release. Thanks to @jbizzle for their contributions to this release.
[#758]: https://github.com/uber-go/zap/pull/758
## 1.12.0 (29 Oct 2019) ## 1.12.0 (29 Oct 2019)
Enhancements: Enhancements:
* [#751][]: Migrate to Go modules. * [#751][]: Migrate to Go modules.
[#751]: https://github.com/uber-go/zap/pull/751
## 1.11.0 (21 Oct 2019) ## 1.11.0 (21 Oct 2019)
Enhancements: Enhancements:
@ -203,6 +326,9 @@ Enhancements:
Thanks to @juicemia, @uhthomas for their contributions to this release. Thanks to @juicemia, @uhthomas for their contributions to this release.
[#725]: https://github.com/uber-go/zap/pull/725
[#736]: https://github.com/uber-go/zap/pull/736
## 1.10.0 (29 Apr 2019) ## 1.10.0 (29 Apr 2019)
Bugfixes: Bugfixes:
@ -220,12 +346,20 @@ Enhancements:
Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
to this release. to this release.
[#657]: https://github.com/uber-go/zap/pull/657
[#706]: https://github.com/uber-go/zap/pull/706
[#610]: https://github.com/uber-go/zap/pull/610
[#675]: https://github.com/uber-go/zap/pull/675
[#704]: https://github.com/uber-go/zap/pull/704
## v1.9.1 (06 Aug 2018) ## v1.9.1 (06 Aug 2018)
Bugfixes: Bugfixes:
* [#614][]: MapObjectEncoder should not ignore empty slices. * [#614][]: MapObjectEncoder should not ignore empty slices.
[#614]: https://github.com/uber-go/zap/pull/614
## v1.9.0 (19 Jul 2018) ## v1.9.0 (19 Jul 2018)
Enhancements: Enhancements:
@ -235,6 +369,10 @@ Enhancements:
Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
@dimroc for their contributions to this release. @dimroc for their contributions to this release.
[#602]: https://github.com/uber-go/zap/pull/602
[#572]: https://github.com/uber-go/zap/pull/572
[#606]: https://github.com/uber-go/zap/pull/606
## v1.8.0 (13 Apr 2018) ## v1.8.0 (13 Apr 2018)
Enhancements: Enhancements:
@ -248,11 +386,18 @@ Bugfixes:
Thanks to @DiSiqueira and @djui for their contributions to this release. Thanks to @DiSiqueira and @djui for their contributions to this release.
[#508]: https://github.com/uber-go/zap/pull/508
[#518]: https://github.com/uber-go/zap/pull/518
[#577]: https://github.com/uber-go/zap/pull/577
[#574]: https://github.com/uber-go/zap/pull/574
## v1.7.1 (25 Sep 2017) ## v1.7.1 (25 Sep 2017)
Bugfixes: Bugfixes:
* [#504][]: Store strings when using AddByteString with the map encoder. * [#504][]: Store strings when using AddByteString with the map encoder.
[#504]: https://github.com/uber-go/zap/pull/504
## v1.7.0 (21 Sep 2017) ## v1.7.0 (21 Sep 2017)
Enhancements: Enhancements:
@ -260,6 +405,8 @@ Enhancements:
* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user * [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
to specify the level of the logged messages. to specify the level of the logged messages.
[#487]: https://github.com/uber-go/zap/pull/487
## v1.6.0 (30 Aug 2017) ## v1.6.0 (30 Aug 2017)
Enhancements: Enhancements:
@ -268,6 +415,9 @@ Enhancements:
* [#490][]: Add a `ContextMap` method to observer logs for simpler * [#490][]: Add a `ContextMap` method to observer logs for simpler
field validation in tests. field validation in tests.
[#490]: https://github.com/uber-go/zap/pull/490
[#491]: https://github.com/uber-go/zap/pull/491
## v1.5.0 (22 Jul 2017) ## v1.5.0 (22 Jul 2017)
Enhancements: Enhancements:
@ -281,6 +431,11 @@ Bugfixes:
Thanks to @richard-tunein and @pavius for their contributions to this release. Thanks to @richard-tunein and @pavius for their contributions to this release.
[#477]: https://github.com/uber-go/zap/pull/477
[#465]: https://github.com/uber-go/zap/pull/465
[#460]: https://github.com/uber-go/zap/pull/460
[#470]: https://github.com/uber-go/zap/pull/470
## v1.4.1 (08 Jun 2017) ## v1.4.1 (08 Jun 2017)
This release fixes two bugs. This release fixes two bugs.
@ -290,6 +445,9 @@ Bugfixes:
* [#435][]: Support a variety of case conventions when unmarshaling levels. * [#435][]: Support a variety of case conventions when unmarshaling levels.
* [#444][]: Fix a panic in the observer. * [#444][]: Fix a panic in the observer.
[#435]: https://github.com/uber-go/zap/pull/435
[#444]: https://github.com/uber-go/zap/pull/444
## v1.4.0 (12 May 2017) ## v1.4.0 (12 May 2017)
This release adds a few small features and is fully backward-compatible. This release adds a few small features and is fully backward-compatible.
@ -302,6 +460,10 @@ Enhancements:
* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a * [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
variety of operations a bit simpler. variety of operations a bit simpler.
[#424]: https://github.com/uber-go/zap/pull/424
[#425]: https://github.com/uber-go/zap/pull/425
[#431]: https://github.com/uber-go/zap/pull/431
## v1.3.0 (25 Apr 2017) ## v1.3.0 (25 Apr 2017)
This release adds an enhancement to zap's testing helpers as well as the This release adds an enhancement to zap's testing helpers as well as the
@ -313,6 +475,9 @@ Enhancements:
particularly useful when testing the `SugaredLogger`. particularly useful when testing the `SugaredLogger`.
* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. * [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
[#415]: https://github.com/uber-go/zap/pull/415
[#416]: https://github.com/uber-go/zap/pull/416
## v1.2.0 (13 Apr 2017) ## v1.2.0 (13 Apr 2017)
This release adds a gRPC compatibility wrapper. It is fully backward-compatible. This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
@ -322,6 +487,8 @@ Enhancements:
* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements * [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
`grpclog.Logger`. `grpclog.Logger`.
[#402]: https://github.com/uber-go/zap/pull/402
## v1.1.0 (31 Mar 2017) ## v1.1.0 (31 Mar 2017)
This release fixes two bugs and adds some enhancements to zap's testing helpers. This release fixes two bugs and adds some enhancements to zap's testing helpers.
@ -339,6 +506,10 @@ Enhancements:
Thanks to @moitias for contributing to this release. Thanks to @moitias for contributing to this release.
[#385]: https://github.com/uber-go/zap/pull/385
[#396]: https://github.com/uber-go/zap/pull/396
[#386]: https://github.com/uber-go/zap/pull/386
## v1.0.0 (14 Mar 2017) ## v1.0.0 (14 Mar 2017)
This is zap's first stable release. All exported APIs are now final, and no This is zap's first stable release. All exported APIs are now final, and no
@ -384,6 +555,20 @@ Enhancements:
Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
contributions to this release. contributions to this release.
[#366]: https://github.com/uber-go/zap/pull/366
[#364]: https://github.com/uber-go/zap/pull/364
[#371]: https://github.com/uber-go/zap/pull/371
[#362]: https://github.com/uber-go/zap/pull/362
[#369]: https://github.com/uber-go/zap/pull/369
[#347]: https://github.com/uber-go/zap/pull/347
[#373]: https://github.com/uber-go/zap/pull/373
[#348]: https://github.com/uber-go/zap/pull/348
[#327]: https://github.com/uber-go/zap/pull/327
[#376]: https://github.com/uber-go/zap/pull/376
[#346]: https://github.com/uber-go/zap/pull/346
[#365]: https://github.com/uber-go/zap/pull/365
[#372]: https://github.com/uber-go/zap/pull/372
## v1.0.0-rc.3 (7 Mar 2017) ## v1.0.0-rc.3 (7 Mar 2017)
This is the third release candidate for zap's stable release. There are no This is the third release candidate for zap's stable release. There are no
@ -405,6 +590,11 @@ Enhancements:
Thanks to @ansel1 and @suyash for their contributions to this release. Thanks to @ansel1 and @suyash for their contributions to this release.
[#339]: https://github.com/uber-go/zap/pull/339
[#307]: https://github.com/uber-go/zap/pull/307
[#353]: https://github.com/uber-go/zap/pull/353
[#311]: https://github.com/uber-go/zap/pull/311
## v1.0.0-rc.2 (21 Feb 2017) ## v1.0.0-rc.2 (21 Feb 2017)
This is the second release candidate for zap's stable release. It includes two This is the second release candidate for zap's stable release. It includes two
@ -442,6 +632,15 @@ Enhancements:
Thanks to @skipor and @chapsuk for their contributions to this release. Thanks to @skipor and @chapsuk for their contributions to this release.
[#316]: https://github.com/uber-go/zap/pull/316
[#309]: https://github.com/uber-go/zap/pull/309
[#317]: https://github.com/uber-go/zap/pull/317
[#321]: https://github.com/uber-go/zap/pull/321
[#325]: https://github.com/uber-go/zap/pull/325
[#333]: https://github.com/uber-go/zap/pull/333
[#326]: https://github.com/uber-go/zap/pull/326
[#300]: https://github.com/uber-go/zap/pull/300
## v1.0.0-rc.1 (14 Feb 2017) ## v1.0.0-rc.1 (14 Feb 2017)
This is the first release candidate for zap's stable release. There are multiple This is the first release candidate for zap's stable release. There are multiple
@ -470,95 +669,3 @@ backward compatibility concerns and all functionality is new.
Early zap adopters should pin to the 0.1.x minor version until they're ready to Early zap adopters should pin to the 0.1.x minor version until they're ready to
upgrade to the upcoming stable release. upgrade to the upcoming stable release.
[#316]: https://github.com/uber-go/zap/pull/316
[#309]: https://github.com/uber-go/zap/pull/309
[#317]: https://github.com/uber-go/zap/pull/317
[#321]: https://github.com/uber-go/zap/pull/321
[#325]: https://github.com/uber-go/zap/pull/325
[#333]: https://github.com/uber-go/zap/pull/333
[#326]: https://github.com/uber-go/zap/pull/326
[#300]: https://github.com/uber-go/zap/pull/300
[#339]: https://github.com/uber-go/zap/pull/339
[#307]: https://github.com/uber-go/zap/pull/307
[#353]: https://github.com/uber-go/zap/pull/353
[#311]: https://github.com/uber-go/zap/pull/311
[#366]: https://github.com/uber-go/zap/pull/366
[#364]: https://github.com/uber-go/zap/pull/364
[#371]: https://github.com/uber-go/zap/pull/371
[#362]: https://github.com/uber-go/zap/pull/362
[#369]: https://github.com/uber-go/zap/pull/369
[#347]: https://github.com/uber-go/zap/pull/347
[#373]: https://github.com/uber-go/zap/pull/373
[#348]: https://github.com/uber-go/zap/pull/348
[#327]: https://github.com/uber-go/zap/pull/327
[#376]: https://github.com/uber-go/zap/pull/376
[#346]: https://github.com/uber-go/zap/pull/346
[#365]: https://github.com/uber-go/zap/pull/365
[#372]: https://github.com/uber-go/zap/pull/372
[#385]: https://github.com/uber-go/zap/pull/385
[#396]: https://github.com/uber-go/zap/pull/396
[#386]: https://github.com/uber-go/zap/pull/386
[#402]: https://github.com/uber-go/zap/pull/402
[#415]: https://github.com/uber-go/zap/pull/415
[#416]: https://github.com/uber-go/zap/pull/416
[#424]: https://github.com/uber-go/zap/pull/424
[#425]: https://github.com/uber-go/zap/pull/425
[#431]: https://github.com/uber-go/zap/pull/431
[#435]: https://github.com/uber-go/zap/pull/435
[#444]: https://github.com/uber-go/zap/pull/444
[#477]: https://github.com/uber-go/zap/pull/477
[#465]: https://github.com/uber-go/zap/pull/465
[#460]: https://github.com/uber-go/zap/pull/460
[#470]: https://github.com/uber-go/zap/pull/470
[#487]: https://github.com/uber-go/zap/pull/487
[#490]: https://github.com/uber-go/zap/pull/490
[#491]: https://github.com/uber-go/zap/pull/491
[#504]: https://github.com/uber-go/zap/pull/504
[#508]: https://github.com/uber-go/zap/pull/508
[#518]: https://github.com/uber-go/zap/pull/518
[#577]: https://github.com/uber-go/zap/pull/577
[#574]: https://github.com/uber-go/zap/pull/574
[#602]: https://github.com/uber-go/zap/pull/602
[#572]: https://github.com/uber-go/zap/pull/572
[#606]: https://github.com/uber-go/zap/pull/606
[#614]: https://github.com/uber-go/zap/pull/614
[#657]: https://github.com/uber-go/zap/pull/657
[#706]: https://github.com/uber-go/zap/pull/706
[#610]: https://github.com/uber-go/zap/pull/610
[#675]: https://github.com/uber-go/zap/pull/675
[#704]: https://github.com/uber-go/zap/pull/704
[#725]: https://github.com/uber-go/zap/pull/725
[#736]: https://github.com/uber-go/zap/pull/736
[#751]: https://github.com/uber-go/zap/pull/751
[#758]: https://github.com/uber-go/zap/pull/758
[#771]: https://github.com/uber-go/zap/pull/771
[#773]: https://github.com/uber-go/zap/pull/773
[#775]: https://github.com/uber-go/zap/pull/775
[#786]: https://github.com/uber-go/zap/pull/786
[#791]: https://github.com/uber-go/zap/pull/791
[#795]: https://github.com/uber-go/zap/pull/795
[#799]: https://github.com/uber-go/zap/pull/799
[#804]: https://github.com/uber-go/zap/pull/804
[#812]: https://github.com/uber-go/zap/pull/812
[#806]: https://github.com/uber-go/zap/pull/806
[#813]: https://github.com/uber-go/zap/pull/813
[#629]: https://github.com/uber-go/zap/pull/629
[#697]: https://github.com/uber-go/zap/pull/697
[#828]: https://github.com/uber-go/zap/pull/828
[#835]: https://github.com/uber-go/zap/pull/835
[#843]: https://github.com/uber-go/zap/pull/843
[#844]: https://github.com/uber-go/zap/pull/844
[#852]: https://github.com/uber-go/zap/pull/852
[#854]: https://github.com/uber-go/zap/pull/854
[#861]: https://github.com/uber-go/zap/pull/861
[#862]: https://github.com/uber-go/zap/pull/862
[#865]: https://github.com/uber-go/zap/pull/865
[#867]: https://github.com/uber-go/zap/pull/867
[#881]: https://github.com/uber-go/zap/pull/881
[#903]: https://github.com/uber-go/zap/pull/903
[#912]: https://github.com/uber-go/zap/pull/912
[#913]: https://github.com/uber-go/zap/pull/913
[#928]: https://github.com/uber-go/zap/pull/928
[#931]: https://github.com/uber-go/zap/pull/931
[#936]: https://github.com/uber-go/zap/pull/936

View File

@ -16,7 +16,7 @@ you to accept the CLA when you open your pull request.
[Fork][fork], then clone the repository: [Fork][fork], then clone the repository:
``` ```bash
mkdir -p $GOPATH/src/go.uber.org mkdir -p $GOPATH/src/go.uber.org
cd $GOPATH/src/go.uber.org cd $GOPATH/src/go.uber.org
git clone git@github.com:your_github_username/zap.git git clone git@github.com:your_github_username/zap.git
@ -27,21 +27,16 @@ git fetch upstream
Make sure that the tests and the linters pass: Make sure that the tests and the linters pass:
``` ```bash
make test make test
make lint make lint
``` ```
If you're not using the minor version of Go specified in the Makefile's
`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
fine, but it means that you'll only discover lint failures after you open your
pull request.
## Making Changes ## Making Changes
Start by creating a new branch for your changes: Start by creating a new branch for your changes:
``` ```bash
cd $GOPATH/src/go.uber.org/zap cd $GOPATH/src/go.uber.org/zap
git checkout master git checkout master
git fetch upstream git fetch upstream
@ -52,22 +47,22 @@ git checkout -b cool_new_feature
Make your changes, then ensure that `make lint` and `make test` still pass. If Make your changes, then ensure that `make lint` and `make test` still pass. If
you're satisfied with your changes, push them to your fork. you're satisfied with your changes, push them to your fork.
``` ```bash
git push origin cool_new_feature git push origin cool_new_feature
``` ```
Then use the GitHub UI to open a pull request. Then use the GitHub UI to open a pull request.
At this point, you're waiting on us to review your changes. We *try* to respond At this point, you're waiting on us to review your changes. We _try_ to respond
to issues and pull requests within a few business days, and we may suggest some to issues and pull requests within a few business days, and we may suggest some
improvements or alternatives. Once your changes are approved, one of the improvements or alternatives. Once your changes are approved, one of the
project maintainers will merge them. project maintainers will merge them.
We're much more likely to approve your changes if you: We're much more likely to approve your changes if you:
* Add tests for new functionality. - Add tests for new functionality.
* Write a [good commit message][commit-message]. - Write a [good commit message][commit-message].
* Maintain backward compatibility. - Maintain backward compatibility.
[fork]: https://github.com/uber-go/zap/fork [fork]: https://github.com/uber-go/zap/fork
[open-issue]: https://github.com/uber-go/zap/issues/new [open-issue]: https://github.com/uber-go/zap/issues/new

83
vendor/go.uber.org/zap/Makefile generated vendored
View File

@ -1,50 +1,51 @@
export GOBIN ?= $(shell pwd)/bin # Directory containing the Makefile.
PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
GOLINT = $(GOBIN)/golint export GOBIN ?= $(PROJECT_ROOT)/bin
STATICCHECK = $(GOBIN)/staticcheck export PATH := $(GOBIN):$(PATH)
GOVULNCHECK = $(GOBIN)/govulncheck
BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
# Directories containing independent Go modules. # Directories containing independent Go modules.
# MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
# We track coverage only for the main module.
MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
# Many Go tools take file globs or directories as arguments instead of packages. # Directories that we want to track coverage for.
GO_FILES := $(shell \ COVER_DIRS = . ./exp
find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
-o -name '*.go' -print | cut -b3-)
.PHONY: all .PHONY: all
all: lint test all: lint test
.PHONY: lint .PHONY: lint
lint: $(GOLINT) $(STATICCHECK) lint: golangci-lint tidy-lint license-lint
@rm -rf lint.log
@echo "Checking formatting..."
@gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
@echo "Checking vet..."
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
@echo "Checking lint..."
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
@echo "Checking staticcheck..."
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
@echo "Checking for unresolved FIXMEs..."
@git grep -i fixme | grep -v -e Makefile | tee -a lint.log
@echo "Checking for license headers..."
@./checklicense.sh | tee -a lint.log
@[ ! -s lint.log ]
@echo "Checking 'go mod tidy'..."
@make tidy
@if ! git diff --quiet; then \
echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
git --no-pager diff; \
fi
$(GOLINT): .PHONY: golangci-lint
cd tools && go install golang.org/x/lint/golint golangci-lint:
@$(foreach mod,$(MODULE_DIRS), \
(cd $(mod) && \
echo "[lint] golangci-lint: $(mod)" && \
golangci-lint run --path-prefix $(mod)) &&) true
$(STATICCHECK): .PHONY: tidy
cd tools && go install honnef.co/go/tools/cmd/staticcheck tidy:
@$(foreach dir,$(MODULE_DIRS), \
(cd $(dir) && go mod tidy) &&) true
.PHONY: tidy-lint
tidy-lint:
@$(foreach mod,$(MODULE_DIRS), \
(cd $(mod) && \
echo "[lint] tidy: $(mod)" && \
go mod tidy && \
git diff --exit-code -- go.mod go.sum) &&) true
.PHONY: license-lint
license-lint:
./checklicense.sh
$(GOVULNCHECK):
cd tools && go install golang.org/x/vuln/cmd/govulncheck
.PHONY: test .PHONY: test
test: test:
@ -52,8 +53,10 @@ test:
.PHONY: cover .PHONY: cover
cover: cover:
go test -race -coverprofile=cover.out -coverpkg=./... ./... @$(foreach dir,$(COVER_DIRS), ( \
go tool cover -html=cover.out -o cover.html cd $(dir) && \
go test -race -coverprofile=cover.out -coverpkg=./... ./... \
&& go tool cover -html=cover.out -o cover.html) &&) true
.PHONY: bench .PHONY: bench
BENCH ?= . BENCH ?= .
@ -68,6 +71,6 @@ updatereadme:
rm -f README.md rm -f README.md
cat .readme.tmpl | go run internal/readme/readme.go > README.md cat .readme.tmpl | go run internal/readme/readme.go > README.md
.PHONY: tidy .PHONY: vulncheck
tidy: vulncheck: $(GOVULNCHECK)
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true $(GOVULNCHECK) ./...

47
vendor/go.uber.org/zap/README.md generated vendored
View File

@ -66,38 +66,41 @@ Log a message and 10 fields:
| Package | Time | Time % to zap | Objects Allocated | | Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: | | :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 2900 ns/op | +0% | 5 allocs/op | :zap: zap | 1744 ns/op | +0% | 5 allocs/op
| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op | :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op
| zerolog | 10639 ns/op | +267% | 32 allocs/op | zerolog | 918 ns/op | -47% | 1 allocs/op
| go-kit | 14434 ns/op | +398% | 59 allocs/op | go-kit | 5590 ns/op | +221% | 57 allocs/op
| logrus | 17104 ns/op | +490% | 81 allocs/op | slog | 5640 ns/op | +223% | 40 allocs/op
| apex/log | 32424 ns/op | +1018% | 66 allocs/op | apex/log | 21184 ns/op | +1115% | 63 allocs/op
| log15 | 33579 ns/op | +1058% | 76 allocs/op | logrus | 24338 ns/op | +1296% | 79 allocs/op
| log15 | 26054 ns/op | +1394% | 74 allocs/op
Log a message with a logger that already has 10 fields of context: Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated | | Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: | | :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 373 ns/op | +0% | 0 allocs/op | :zap: zap | 193 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op | :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op
| zerolog | 288 ns/op | -23% | 0 allocs/op | zerolog | 81 ns/op | -58% | 0 allocs/op
| go-kit | 11785 ns/op | +3060% | 58 allocs/op | slog | 322 ns/op | +67% | 0 allocs/op
| logrus | 19629 ns/op | +5162% | 70 allocs/op | go-kit | 5377 ns/op | +2686% | 56 allocs/op
| log15 | 21866 ns/op | +5762% | 72 allocs/op | apex/log | 19518 ns/op | +10013% | 53 allocs/op
| apex/log | 30890 ns/op | +8182% | 55 allocs/op | log15 | 19812 ns/op | +10165% | 70 allocs/op
| logrus | 21997 ns/op | +11297% | 68 allocs/op
Log a static string, without any context or `printf`-style templating: Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated | | Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: | | :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 381 ns/op | +0% | 0 allocs/op | :zap: zap | 165 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op | :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op
| zerolog | 369 ns/op | -3% | 0 allocs/op | zerolog | 95 ns/op | -42% | 0 allocs/op
| standard library | 385 ns/op | +1% | 2 allocs/op | slog | 296 ns/op | +79% | 0 allocs/op
| go-kit | 606 ns/op | +59% | 11 allocs/op | go-kit | 415 ns/op | +152% | 9 allocs/op
| logrus | 1730 ns/op | +354% | 25 allocs/op | standard library | 422 ns/op | +156% | 2 allocs/op
| apex/log | 1998 ns/op | +424% | 7 allocs/op | apex/log | 1601 ns/op | +870% | 5 allocs/op
| log15 | 4546 ns/op | +1093% | 22 allocs/op | logrus | 3017 ns/op | +1728% | 23 allocs/op
| log15 | 3469 ns/op | +2002% | 20 allocs/op
## Development Status: Stable ## Development Status: Stable

127
vendor/go.uber.org/zap/array.go generated vendored
View File

@ -21,6 +21,7 @@
package zap package zap
import ( import (
"fmt"
"time" "time"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
@ -94,11 +95,137 @@ func Int8s(key string, nums []int8) Field {
return Array(key, int8s(nums)) return Array(key, int8s(nums))
} }
// Objects constructs a field with the given key, holding a list of the
// provided objects that can be marshaled by Zap.
//
// Note that these objects must implement zapcore.ObjectMarshaler directly.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the Request type, not its pointer (*Request).
// If it's on the pointer, use ObjectValues.
//
// Given an object that implements MarshalLogObject on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Author struct{ ... }
// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var authors []Author = ...
// logger.Info("loading article", zap.Objects("authors", authors))
//
// Similarly, given a type that implements MarshalLogObject on its pointer
// receiver, you can log a slice of pointers to that object with Objects like
// so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
//
// If instead, you have a slice of values of such an object, use the
// ObjectValues constructor.
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
return Array(key, objects[T](values))
}
type objects[T zapcore.ObjectMarshaler] []T
func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
if err := arr.AppendObject(o); err != nil {
return err
}
}
return nil
}
// ObjectMarshalerPtr is a constraint that specifies that the given type
// implements zapcore.ObjectMarshaler on a pointer receiver.
type ObjectMarshalerPtr[T any] interface {
*T
zapcore.ObjectMarshaler
}
// ObjectValues constructs a field with the given key, holding a list of the
// provided objects, where pointers to these objects can be marshaled by Zap.
//
// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the *Request type, not the value (Request).
// If it's on the value, use Objects.
//
// Given an object that implements MarshalLogObject on the pointer receiver,
// you can log a slice of those objects with ObjectValues like so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
//
// If instead, you have a slice of pointers of such an object, use the Objects
// field constructor.
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
return Array(key, objectValues[T, P](values))
}
type objectValues[T any, P ObjectMarshalerPtr[T]] []T
func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range os {
// It is necessary for us to explicitly reference the "P" type.
// We cannot simply pass "&os[i]" to AppendObject because its type
// is "*T", which the type system does not consider as
// implementing ObjectMarshaler.
// Only the type "P" satisfies ObjectMarshaler, which we have
// to convert "*T" to explicitly.
var p P = &os[i]
if err := arr.AppendObject(p); err != nil {
return err
}
}
return nil
}
// Strings constructs a field that carries a slice of strings. // Strings constructs a field that carries a slice of strings.
func Strings(key string, ss []string) Field { func Strings(key string, ss []string) Field {
return Array(key, stringArray(ss)) return Array(key, stringArray(ss))
} }
// Stringers constructs a field with the given key, holding a list of the
// output provided by the value's String method
//
// Given an object that implements String on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Request struct{ ... }
// func (a Request) String() string
//
// var requests []Request = ...
// logger.Info("sending requests", zap.Stringers("requests", requests))
//
// Note that these objects must implement fmt.Stringer directly.
// That is, if you're trying to marshal a []Request, the String method
// must be declared on the Request type, not its pointer (*Request).
func Stringers[T fmt.Stringer](key string, values []T) Field {
return Array(key, stringers[T](values))
}
type stringers[T fmt.Stringer] []T
func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
arr.AppendString(o.String())
}
return nil
}
// Times constructs a field that carries a slice of time.Times. // Times constructs a field that carries a slice of time.Times.
func Times(key string, ts []time.Time) Field { func Times(key string, ts []time.Time) Field {
return Array(key, times(ts)) return Array(key, times(ts))

View File

@ -42,6 +42,11 @@ func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v) b.bs = append(b.bs, v)
} }
// AppendBytes writes a single byte to the Buffer.
func (b *Buffer) AppendBytes(v []byte) {
b.bs = append(b.bs, v...)
}
// AppendString writes a string to the Buffer. // AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) { func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...) b.bs = append(b.bs, s...)

View File

@ -20,25 +20,29 @@
package buffer package buffer
import "sync" import (
"go.uber.org/zap/internal/pool"
)
// A Pool is a type-safe wrapper around a sync.Pool. // A Pool is a type-safe wrapper around a sync.Pool.
type Pool struct { type Pool struct {
p *sync.Pool p *pool.Pool[*Buffer]
} }
// NewPool constructs a new Pool. // NewPool constructs a new Pool.
func NewPool() Pool { func NewPool() Pool {
return Pool{p: &sync.Pool{ return Pool{
New: func() interface{} { p: pool.New(func() *Buffer {
return &Buffer{bs: make([]byte, 0, _size)} return &Buffer{
}, bs: make([]byte, 0, _size),
}} }
}),
}
} }
// Get retrieves a Buffer from the pool, creating one if necessary. // Get retrieves a Buffer from the pool, creating one if necessary.
func (p Pool) Get() *Buffer { func (p Pool) Get() *Buffer {
buf := p.p.Get().(*Buffer) buf := p.p.Get()
buf.Reset() buf.Reset()
buf.pool = p buf.pool = p
return buf return buf

88
vendor/go.uber.org/zap/config.go generated vendored
View File

@ -21,7 +21,7 @@
package zap package zap
import ( import (
"fmt" "errors"
"sort" "sort"
"time" "time"
@ -95,6 +95,32 @@ type Config struct {
// NewProductionEncoderConfig returns an opinionated EncoderConfig for // NewProductionEncoderConfig returns an opinionated EncoderConfig for
// production environments. // production environments.
//
// Messages encoded with this configuration will be JSON-formatted
// and will have the following keys by default:
//
// - "level": The logging level (e.g. "info", "error").
// - "ts": The current time in number of seconds since the Unix epoch.
// - "msg": The message passed to the log statement.
// - "caller": If available, a short path to the file and line number
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
// - "stacktrace": If available, a stack trace from the line
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
//
// By default, the following formats are used for different types:
//
// - Time is formatted as floating-point number of seconds since the Unix
// epoch.
// - Duration is formatted as floating-point number of seconds.
//
// You may change these by setting the appropriate fields in the returned
// object.
// For example, use the following to change the time encoding format:
//
// cfg := zap.NewProductionEncoderConfig()
// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewProductionEncoderConfig() zapcore.EncoderConfig { func NewProductionEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{ return zapcore.EncoderConfig{
TimeKey: "ts", TimeKey: "ts",
@ -112,11 +138,22 @@ func NewProductionEncoderConfig() zapcore.EncoderConfig {
} }
} }
// NewProductionConfig is a reasonable production logging configuration. // NewProductionConfig builds a reasonable default production logging
// Logging is enabled at InfoLevel and above. // configuration.
// Logging is enabled at InfoLevel and above, and uses a JSON encoder.
// Logs are written to standard error.
// Stacktraces are included on logs of ErrorLevel and above.
// DPanicLevel logs will not panic, but will write a stacktrace.
// //
// It uses a JSON encoder, writes to standard error, and enables sampling. // Sampling is enabled at 100:100 by default,
// Stacktraces are automatically included on logs of ErrorLevel and above. // meaning that after the first 100 log entries
// with the same level and message in the same second,
// it will log every 100th entry
// with the same level and message in the same second.
// You may disable this behavior by setting Sampling to nil.
//
// See [NewProductionEncoderConfig] for information
// on the default encoder configuration.
func NewProductionConfig() Config { func NewProductionConfig() Config {
return Config{ return Config{
Level: NewAtomicLevelAt(InfoLevel), Level: NewAtomicLevelAt(InfoLevel),
@ -134,6 +171,32 @@ func NewProductionConfig() Config {
// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for // NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
// development environments. // development environments.
//
// Messages encoded with this configuration will use Zap's console encoder
// intended to print human-readable output.
// It will print log messages with the following information:
//
// - The log level (e.g. "INFO", "ERROR").
// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
// - The message passed to the log statement.
// - If available, a short path to the file and line number
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
// - If available, a stacktrace from the line
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
//
// By default, the following formats are used for different types:
//
// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
// - Duration is formatted as a string (e.g. "1.234s").
//
// You may change these by setting the appropriate fields in the returned
// object.
// For example, use the following to change the time encoding format:
//
// cfg := zap.NewDevelopmentEncoderConfig()
// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{ return zapcore.EncoderConfig{
// Keys can be anything except the empty string. // Keys can be anything except the empty string.
@ -152,12 +215,15 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
} }
} }
// NewDevelopmentConfig is a reasonable development logging configuration. // NewDevelopmentConfig builds a reasonable default development logging
// Logging is enabled at DebugLevel and above. // configuration.
// Logging is enabled at DebugLevel and above, and uses a console encoder.
// Logs are written to standard error.
// Stacktraces are included on logs of WarnLevel and above.
// DPanicLevel logs will panic.
// //
// It enables development mode (which makes DPanicLevel logs panic), uses a // See [NewDevelopmentEncoderConfig] for information
// console encoder, writes to standard error, and disables sampling. // on the default encoder configuration.
// Stacktraces are automatically included on logs of WarnLevel and above.
func NewDevelopmentConfig() Config { func NewDevelopmentConfig() Config {
return Config{ return Config{
Level: NewAtomicLevelAt(DebugLevel), Level: NewAtomicLevelAt(DebugLevel),
@ -182,7 +248,7 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) {
} }
if cfg.Level == (AtomicLevel{}) { if cfg.Level == (AtomicLevel{}) {
return nil, fmt.Errorf("missing Level") return nil, errors.New("missing Level")
} }
log := New( log := New(

60
vendor/go.uber.org/zap/doc.go generated vendored
View File

@ -32,7 +32,7 @@
// they need to count every allocation and when they'd prefer a more familiar, // they need to count every allocation and when they'd prefer a more familiar,
// loosely typed API. // loosely typed API.
// //
// Choosing a Logger // # Choosing a Logger
// //
// In contexts where performance is nice, but not critical, use the // In contexts where performance is nice, but not critical, use the
// SugaredLogger. It's 4-10x faster than other structured logging packages and // SugaredLogger. It's 4-10x faster than other structured logging packages and
@ -41,14 +41,15 @@
// variadic number of key-value pairs. (For more advanced use cases, they also // variadic number of key-value pairs. (For more advanced use cases, they also
// accept strongly typed fields - see the SugaredLogger.With documentation for // accept strongly typed fields - see the SugaredLogger.With documentation for
// details.) // details.)
// sugar := zap.NewExample().Sugar() //
// defer sugar.Sync() // sugar := zap.NewExample().Sugar()
// sugar.Infow("failed to fetch URL", // defer sugar.Sync()
// "url", "http://example.com", // sugar.Infow("failed to fetch URL",
// "attempt", 3, // "url", "http://example.com",
// "backoff", time.Second, // "attempt", 3,
// ) // "backoff", time.Second,
// sugar.Infof("failed to fetch URL: %s", "http://example.com") // )
// sugar.Infof("failed to fetch URL: %s", "http://example.com")
// //
// By default, loggers are unbuffered. However, since zap's low-level APIs // By default, loggers are unbuffered. However, since zap's low-level APIs
// allow buffering, calling Sync before letting your process exit is a good // allow buffering, calling Sync before letting your process exit is a good
@ -57,32 +58,35 @@
// In the rare contexts where every microsecond and every allocation matter, // In the rare contexts where every microsecond and every allocation matter,
// use the Logger. It's even faster than the SugaredLogger and allocates far // use the Logger. It's even faster than the SugaredLogger and allocates far
// less, but it only supports strongly-typed, structured logging. // less, but it only supports strongly-typed, structured logging.
// logger := zap.NewExample() //
// defer logger.Sync() // logger := zap.NewExample()
// logger.Info("failed to fetch URL", // defer logger.Sync()
// zap.String("url", "http://example.com"), // logger.Info("failed to fetch URL",
// zap.Int("attempt", 3), // zap.String("url", "http://example.com"),
// zap.Duration("backoff", time.Second), // zap.Int("attempt", 3),
// ) // zap.Duration("backoff", time.Second),
// )
// //
// Choosing between the Logger and SugaredLogger doesn't need to be an // Choosing between the Logger and SugaredLogger doesn't need to be an
// application-wide decision: converting between the two is simple and // application-wide decision: converting between the two is simple and
// inexpensive. // inexpensive.
// logger := zap.NewExample()
// defer logger.Sync()
// sugar := logger.Sugar()
// plain := sugar.Desugar()
// //
// Configuring Zap // logger := zap.NewExample()
// defer logger.Sync()
// sugar := logger.Sugar()
// plain := sugar.Desugar()
//
// # Configuring Zap
// //
// The simplest way to build a Logger is to use zap's opinionated presets: // The simplest way to build a Logger is to use zap's opinionated presets:
// NewExample, NewProduction, and NewDevelopment. These presets build a logger // NewExample, NewProduction, and NewDevelopment. These presets build a logger
// with a single function call: // with a single function call:
// logger, err := zap.NewProduction() //
// if err != nil { // logger, err := zap.NewProduction()
// log.Fatalf("can't initialize zap logger: %v", err) // if err != nil {
// } // log.Fatalf("can't initialize zap logger: %v", err)
// defer logger.Sync() // }
// defer logger.Sync()
// //
// Presets are fine for small projects, but larger projects and organizations // Presets are fine for small projects, but larger projects and organizations
// naturally require a bit more customization. For most users, zap's Config // naturally require a bit more customization. For most users, zap's Config
@ -94,7 +98,7 @@
// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration // go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
// example for sample code. // example for sample code.
// //
// Extending Zap // # Extending Zap
// //
// The zap package itself is a relatively thin wrapper around the interfaces // The zap package itself is a relatively thin wrapper around the interfaces
// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., // in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
@ -106,7 +110,7 @@
// Similarly, package authors can use the high-performance Encoder and Core // Similarly, package authors can use the high-performance Encoder and Core
// implementations in the zapcore package to build their own loggers. // implementations in the zapcore package to build their own loggers.
// //
// Frequently Asked Questions // # Frequently Asked Questions
// //
// An FAQ covering everything from installation errors to design decisions is // An FAQ covering everything from installation errors to design decisions is
// available at https://github.com/uber-go/zap/blob/master/FAQ.md. // available at https://github.com/uber-go/zap/blob/master/FAQ.md.

2
vendor/go.uber.org/zap/encoder.go generated vendored
View File

@ -63,7 +63,7 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco
func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
return nil, fmt.Errorf("missing EncodeTime in EncoderConfig") return nil, errors.New("missing EncodeTime in EncoderConfig")
} }
_encoderMutex.RLock() _encoderMutex.RLock()

14
vendor/go.uber.org/zap/error.go generated vendored
View File

@ -21,14 +21,13 @@
package zap package zap
import ( import (
"sync" "go.uber.org/zap/internal/pool"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
) )
var _errArrayElemPool = sync.Pool{New: func() interface{} { var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{} return &errArrayElem{}
}} })
// Error is shorthand for the common idiom NamedError("error", err). // Error is shorthand for the common idiom NamedError("error", err).
func Error(err error) Field { func Error(err error) Field {
@ -60,11 +59,14 @@ func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
// potentially an "errorVerbose" attribute, we need to wrap it in a // potentially an "errorVerbose" attribute, we need to wrap it in a
// type that implements LogObjectMarshaler. To prevent this from // type that implements LogObjectMarshaler. To prevent this from
// allocating, pool the wrapper type. // allocating, pool the wrapper type.
elem := _errArrayElemPool.Get().(*errArrayElem) elem := _errArrayElemPool.Get()
elem.error = errs[i] elem.error = errs[i]
arr.AppendObject(elem) err := arr.AppendObject(elem)
elem.error = nil elem.error = nil
_errArrayElemPool.Put(elem) _errArrayElemPool.Put(elem)
if err != nil {
return err
}
} }
return nil return nil
} }

194
vendor/go.uber.org/zap/field.go generated vendored
View File

@ -25,6 +25,7 @@ import (
"math" "math"
"time" "time"
"go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
) )
@ -374,7 +375,7 @@ func StackSkip(key string, skip int) Field {
// from expanding the zapcore.Field union struct to include a byte slice. Since // from expanding the zapcore.Field union struct to include a byte slice. Since
// taking a stacktrace is already so expensive (~10us), the extra allocation // taking a stacktrace is already so expensive (~10us), the extra allocation
// is okay. // is okay.
return String(key, takeStacktrace(skip+1)) // skip StackSkip return String(key, stacktrace.Take(skip+1)) // skip StackSkip
} }
// Duration constructs a field with the given key and value. The encoder // Duration constructs a field with the given key and value. The encoder
@ -410,6 +411,63 @@ func Inline(val zapcore.ObjectMarshaler) Field {
} }
} }
// Dict constructs a field containing the provided key-value pairs.
// It acts similar to [Object], but with the fields specified as arguments.
func Dict(key string, val ...Field) Field {
return dictField(key, val)
}
// We need a function with the signature (string, T) for zap.Any.
func dictField(key string, val []Field) Field {
return Object(key, dictObject(val))
}
type dictObject []Field
func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
for _, f := range d {
f.AddTo(enc)
}
return nil
}
// We discovered an issue where zap.Any can cause a performance degradation
// when used in new goroutines.
//
// This happens because the compiler assigns 4.8kb (one zap.Field per arm of
// switch statement) of stack space for zap.Any when it takes the form:
//
// switch v := v.(type) {
// case string:
// return String(key, v)
// case int:
// return Int(key, v)
// // ...
// default:
// return Reflect(key, v)
// }
//
// To avoid this, we use the type switch to assign a value to a single local variable
// and then call a function on it.
// The local variable is just a function reference so it doesn't allocate
// when converted to an interface{}.
//
// A fair bit of experimentation went into this.
// See also:
//
// - https://github.com/uber-go/zap/pull/1301
// - https://github.com/uber-go/zap/pull/1303
// - https://github.com/uber-go/zap/pull/1304
// - https://github.com/uber-go/zap/pull/1305
// - https://github.com/uber-go/zap/pull/1308
type anyFieldC[T any] func(string, T) Field
func (f anyFieldC[T]) Any(key string, val any) Field {
v, _ := val.(T)
// val is guaranteed to be a T, except when it's nil.
return f(key, v)
}
// Any takes a key and an arbitrary value and chooses the best way to represent // Any takes a key and an arbitrary value and chooses the best way to represent
// them as a field, falling back to a reflection-based approach only if // them as a field, falling back to a reflection-based approach only if
// necessary. // necessary.
@ -418,132 +476,138 @@ func Inline(val zapcore.ObjectMarshaler) Field {
// them. To minimize surprises, []byte values are treated as binary blobs, byte // them. To minimize surprises, []byte values are treated as binary blobs, byte
// values are treated as uint8, and runes are always treated as integers. // values are treated as uint8, and runes are always treated as integers.
func Any(key string, value interface{}) Field { func Any(key string, value interface{}) Field {
switch val := value.(type) { var c interface{ Any(string, any) Field }
switch value.(type) {
case zapcore.ObjectMarshaler: case zapcore.ObjectMarshaler:
return Object(key, val) c = anyFieldC[zapcore.ObjectMarshaler](Object)
case zapcore.ArrayMarshaler: case zapcore.ArrayMarshaler:
return Array(key, val) c = anyFieldC[zapcore.ArrayMarshaler](Array)
case []Field:
c = anyFieldC[[]Field](dictField)
case bool: case bool:
return Bool(key, val) c = anyFieldC[bool](Bool)
case *bool: case *bool:
return Boolp(key, val) c = anyFieldC[*bool](Boolp)
case []bool: case []bool:
return Bools(key, val) c = anyFieldC[[]bool](Bools)
case complex128: case complex128:
return Complex128(key, val) c = anyFieldC[complex128](Complex128)
case *complex128: case *complex128:
return Complex128p(key, val) c = anyFieldC[*complex128](Complex128p)
case []complex128: case []complex128:
return Complex128s(key, val) c = anyFieldC[[]complex128](Complex128s)
case complex64: case complex64:
return Complex64(key, val) c = anyFieldC[complex64](Complex64)
case *complex64: case *complex64:
return Complex64p(key, val) c = anyFieldC[*complex64](Complex64p)
case []complex64: case []complex64:
return Complex64s(key, val) c = anyFieldC[[]complex64](Complex64s)
case float64: case float64:
return Float64(key, val) c = anyFieldC[float64](Float64)
case *float64: case *float64:
return Float64p(key, val) c = anyFieldC[*float64](Float64p)
case []float64: case []float64:
return Float64s(key, val) c = anyFieldC[[]float64](Float64s)
case float32: case float32:
return Float32(key, val) c = anyFieldC[float32](Float32)
case *float32: case *float32:
return Float32p(key, val) c = anyFieldC[*float32](Float32p)
case []float32: case []float32:
return Float32s(key, val) c = anyFieldC[[]float32](Float32s)
case int: case int:
return Int(key, val) c = anyFieldC[int](Int)
case *int: case *int:
return Intp(key, val) c = anyFieldC[*int](Intp)
case []int: case []int:
return Ints(key, val) c = anyFieldC[[]int](Ints)
case int64: case int64:
return Int64(key, val) c = anyFieldC[int64](Int64)
case *int64: case *int64:
return Int64p(key, val) c = anyFieldC[*int64](Int64p)
case []int64: case []int64:
return Int64s(key, val) c = anyFieldC[[]int64](Int64s)
case int32: case int32:
return Int32(key, val) c = anyFieldC[int32](Int32)
case *int32: case *int32:
return Int32p(key, val) c = anyFieldC[*int32](Int32p)
case []int32: case []int32:
return Int32s(key, val) c = anyFieldC[[]int32](Int32s)
case int16: case int16:
return Int16(key, val) c = anyFieldC[int16](Int16)
case *int16: case *int16:
return Int16p(key, val) c = anyFieldC[*int16](Int16p)
case []int16: case []int16:
return Int16s(key, val) c = anyFieldC[[]int16](Int16s)
case int8: case int8:
return Int8(key, val) c = anyFieldC[int8](Int8)
case *int8: case *int8:
return Int8p(key, val) c = anyFieldC[*int8](Int8p)
case []int8: case []int8:
return Int8s(key, val) c = anyFieldC[[]int8](Int8s)
case string: case string:
return String(key, val) c = anyFieldC[string](String)
case *string: case *string:
return Stringp(key, val) c = anyFieldC[*string](Stringp)
case []string: case []string:
return Strings(key, val) c = anyFieldC[[]string](Strings)
case uint: case uint:
return Uint(key, val) c = anyFieldC[uint](Uint)
case *uint: case *uint:
return Uintp(key, val) c = anyFieldC[*uint](Uintp)
case []uint: case []uint:
return Uints(key, val) c = anyFieldC[[]uint](Uints)
case uint64: case uint64:
return Uint64(key, val) c = anyFieldC[uint64](Uint64)
case *uint64: case *uint64:
return Uint64p(key, val) c = anyFieldC[*uint64](Uint64p)
case []uint64: case []uint64:
return Uint64s(key, val) c = anyFieldC[[]uint64](Uint64s)
case uint32: case uint32:
return Uint32(key, val) c = anyFieldC[uint32](Uint32)
case *uint32: case *uint32:
return Uint32p(key, val) c = anyFieldC[*uint32](Uint32p)
case []uint32: case []uint32:
return Uint32s(key, val) c = anyFieldC[[]uint32](Uint32s)
case uint16: case uint16:
return Uint16(key, val) c = anyFieldC[uint16](Uint16)
case *uint16: case *uint16:
return Uint16p(key, val) c = anyFieldC[*uint16](Uint16p)
case []uint16: case []uint16:
return Uint16s(key, val) c = anyFieldC[[]uint16](Uint16s)
case uint8: case uint8:
return Uint8(key, val) c = anyFieldC[uint8](Uint8)
case *uint8: case *uint8:
return Uint8p(key, val) c = anyFieldC[*uint8](Uint8p)
case []byte: case []byte:
return Binary(key, val) c = anyFieldC[[]byte](Binary)
case uintptr: case uintptr:
return Uintptr(key, val) c = anyFieldC[uintptr](Uintptr)
case *uintptr: case *uintptr:
return Uintptrp(key, val) c = anyFieldC[*uintptr](Uintptrp)
case []uintptr: case []uintptr:
return Uintptrs(key, val) c = anyFieldC[[]uintptr](Uintptrs)
case time.Time: case time.Time:
return Time(key, val) c = anyFieldC[time.Time](Time)
case *time.Time: case *time.Time:
return Timep(key, val) c = anyFieldC[*time.Time](Timep)
case []time.Time: case []time.Time:
return Times(key, val) c = anyFieldC[[]time.Time](Times)
case time.Duration: case time.Duration:
return Duration(key, val) c = anyFieldC[time.Duration](Duration)
case *time.Duration: case *time.Duration:
return Durationp(key, val) c = anyFieldC[*time.Duration](Durationp)
case []time.Duration: case []time.Duration:
return Durations(key, val) c = anyFieldC[[]time.Duration](Durations)
case error: case error:
return NamedError(key, val) c = anyFieldC[error](NamedError)
case []error: case []error:
return Errors(key, val) c = anyFieldC[[]error](Errors)
case fmt.Stringer: case fmt.Stringer:
return Stringer(key, val) c = anyFieldC[fmt.Stringer](Stringer)
default: default:
return Reflect(key, val) c = anyFieldC[any](Reflect)
} }
return c.Any(key, value)
} }

View File

@ -22,6 +22,7 @@ package zap
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -32,22 +33,23 @@ import (
// ServeHTTP is a simple JSON endpoint that can report on or change the current // ServeHTTP is a simple JSON endpoint that can report on or change the current
// logging level. // logging level.
// //
// GET // # GET
// //
// The GET request returns a JSON description of the current logging level like: // The GET request returns a JSON description of the current logging level like:
// {"level":"info"}
// //
// PUT // {"level":"info"}
//
// # PUT
// //
// The PUT request changes the logging level. It is perfectly safe to change the // The PUT request changes the logging level. It is perfectly safe to change the
// logging level while a program is running. Two content types are supported: // logging level while a program is running. Two content types are supported:
// //
// Content-Type: application/x-www-form-urlencoded // Content-Type: application/x-www-form-urlencoded
// //
// With this content type, the level can be provided through the request body or // With this content type, the level can be provided through the request body or
// a query parameter. The log level is URL encoded like: // a query parameter. The log level is URL encoded like:
// //
// level=debug // level=debug
// //
// The request body takes precedence over the query parameter, if both are // The request body takes precedence over the query parameter, if both are
// specified. // specified.
@ -55,19 +57,25 @@ import (
// This content type is the default for a curl PUT request. Following are two // This content type is the default for a curl PUT request. Following are two
// example curl requests that both set the logging level to debug. // example curl requests that both set the logging level to debug.
// //
// curl -X PUT localhost:8080/log/level?level=debug // curl -X PUT localhost:8080/log/level?level=debug
// curl -X PUT localhost:8080/log/level -d level=debug // curl -X PUT localhost:8080/log/level -d level=debug
// //
// For any other content type, the payload is expected to be JSON encoded and // For any other content type, the payload is expected to be JSON encoded and
// look like: // look like:
// //
// {"level":"info"} // {"level":"info"}
// //
// An example curl request could look like this: // An example curl request could look like this:
// //
// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' // curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
//
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if err := lvl.serveHTTP(w, r); err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "internal error: %v", err)
}
}
func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
type errorResponse struct { type errorResponse struct {
Error string `json:"error"` Error string `json:"error"`
} }
@ -79,19 +87,20 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.Method { switch r.Method {
case http.MethodGet: case http.MethodGet:
enc.Encode(payload{Level: lvl.Level()}) return enc.Encode(payload{Level: lvl.Level()})
case http.MethodPut: case http.MethodPut:
requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
if err != nil { if err != nil {
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
enc.Encode(errorResponse{Error: err.Error()}) return enc.Encode(errorResponse{Error: err.Error()})
return
} }
lvl.SetLevel(requestedLvl) lvl.SetLevel(requestedLvl)
enc.Encode(payload{Level: lvl.Level()}) return enc.Encode(payload{Level: lvl.Level()})
default: default:
w.WriteHeader(http.StatusMethodNotAllowed) w.WriteHeader(http.StatusMethodNotAllowed)
enc.Encode(errorResponse{ return enc.Encode(errorResponse{
Error: "Only GET and PUT are supported.", Error: "Only GET and PUT are supported.",
}) })
} }
@ -108,7 +117,7 @@ func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error
func decodePutURL(r *http.Request) (zapcore.Level, error) { func decodePutURL(r *http.Request) (zapcore.Level, error) {
lvl := r.FormValue("level") lvl := r.FormValue("level")
if lvl == "" { if lvl == "" {
return 0, fmt.Errorf("must specify logging level") return 0, errors.New("must specify logging level")
} }
var l zapcore.Level var l zapcore.Level
if err := l.UnmarshalText([]byte(lvl)); err != nil { if err := l.UnmarshalText([]byte(lvl)); err != nil {
@ -125,8 +134,7 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) {
return 0, fmt.Errorf("malformed request body: %v", err) return 0, fmt.Errorf("malformed request body: %v", err)
} }
if pld.Level == nil { if pld.Level == nil {
return 0, fmt.Errorf("must specify logging level") return 0, errors.New("must specify logging level")
} }
return *pld.Level, nil return *pld.Level, nil
} }

View File

@ -24,24 +24,25 @@ package exit
import "os" import "os"
var real = func() { os.Exit(1) } var _exit = os.Exit
// Exit normally terminates the process by calling os.Exit(1). If the package // With terminates the process by calling os.Exit(code). If the package is
// is stubbed, it instead records a call in the testing spy. // stubbed, it instead records a call in the testing spy.
func Exit() { func With(code int) {
real() _exit(code)
} }
// A StubbedExit is a testing fake for os.Exit. // A StubbedExit is a testing fake for os.Exit.
type StubbedExit struct { type StubbedExit struct {
Exited bool Exited bool
prev func() Code int
prev func(code int)
} }
// Stub substitutes a fake for the call to os.Exit(1). // Stub substitutes a fake for the call to os.Exit(1).
func Stub() *StubbedExit { func Stub() *StubbedExit {
s := &StubbedExit{prev: real} s := &StubbedExit{prev: _exit}
real = s.exit _exit = s.exit
return s return s
} }
@ -56,9 +57,10 @@ func WithStub(f func()) *StubbedExit {
// Unstub restores the previous exit function. // Unstub restores the previous exit function.
func (se *StubbedExit) Unstub() { func (se *StubbedExit) Unstub() {
real = se.prev _exit = se.prev
} }
func (se *StubbedExit) exit() { func (se *StubbedExit) exit(code int) {
se.Exited = true se.Exited = true
se.Code = code
} }

View File

@ -1,4 +1,4 @@
// Copyright (c) 2020 Uber Technologies, Inc. // Copyright (c) 2022 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -18,22 +18,20 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. // THE SOFTWARE.
package atomic // Package internal and its subpackages hold types and functionality
// that are not part of Zap's public API.
package internal
// atomic.Value panics on nil inputs, or if the underlying type changes. import "go.uber.org/zap/zapcore"
// Stabilize by always storing a custom struct that we control.
//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go // LeveledEnabler is an interface satisfied by LevelEnablers that are able to
// report their own level.
//
// This interface is defined to use more conveniently in tests and non-zapcore
// packages.
// This cannot be imported from zapcore because of the cyclic dependency.
type LeveledEnabler interface {
zapcore.LevelEnabler
type packedError struct{ Value error } Level() zapcore.Level
func packError(v error) interface{} {
return packedError{v}
}
func unpackError(v interface{}) error {
if err, ok := v.(packedError); ok {
return err.Value
}
return nil
} }

View File

@ -1,4 +1,4 @@
// Copyright (c) 2020 Uber Technologies, Inc. // Copyright (c) 2023 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -18,36 +18,41 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. // THE SOFTWARE.
package atomic // Package pool provides internal pool utilities.
package pool
import ( import (
"strconv" "sync"
) )
//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go // A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed
// object pooling.
func truthy(n uint32) bool { //
return n == 1 // Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will
// not be detected, so all internal pool use must take care to only store
// pointer types.
type Pool[T any] struct {
pool sync.Pool
} }
func boolToInt(b bool) uint32 { // New returns a new [Pool] for T, and will use fn to construct new Ts when
if b { // the pool is empty.
return 1 func New[T any](fn func() T) *Pool[T] {
} return &Pool[T]{
return 0 pool: sync.Pool{
} New: func() any {
return fn()
// Toggle atomically negates the Boolean and returns the previous value. },
func (b *Bool) Toggle() bool { },
for {
old := b.Load()
if b.CAS(old, !old) {
return old
}
} }
} }
// String encodes the wrapped value as a string. // Get gets a T from the pool, or creates a new one if the pool is empty.
func (b *Bool) String() string { func (p *Pool[T]) Get() T {
return strconv.FormatBool(b.Load()) return p.pool.Get().(T)
}
// Put returns x into the pool.
func (p *Pool[T]) Put(x T) {
p.pool.Put(x)
} }

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc. // Copyright (c) 2023 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -18,25 +18,26 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. // THE SOFTWARE.
package zap // Package stacktrace provides support for gathering stack traces
// efficiently.
package stacktrace
import ( import (
"runtime" "runtime"
"sync"
"go.uber.org/zap/buffer" "go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/pool"
) )
var _stacktracePool = sync.Pool{ var _stackPool = pool.New(func() *Stack {
New: func() interface{} { return &Stack{
return &stacktrace{ storage: make([]uintptr, 64),
storage: make([]uintptr, 64), }
} })
},
}
type stacktrace struct { // Stack is a captured stack trace.
type Stack struct {
pcs []uintptr // program counters; always a subslice of storage pcs []uintptr // program counters; always a subslice of storage
frames *runtime.Frames frames *runtime.Frames
@ -50,30 +51,30 @@ type stacktrace struct {
storage []uintptr storage []uintptr
} }
// stacktraceDepth specifies how deep of a stack trace should be captured. // Depth specifies how deep of a stack trace should be captured.
type stacktraceDepth int type Depth int
const ( const (
// stacktraceFirst captures only the first frame. // First captures only the first frame.
stacktraceFirst stacktraceDepth = iota First Depth = iota
// stacktraceFull captures the entire call stack, allocating more // Full captures the entire call stack, allocating more
// storage for it if needed. // storage for it if needed.
stacktraceFull Full
) )
// captureStacktrace captures a stack trace of the specified depth, skipping // Capture captures a stack trace of the specified depth, skipping
// the provided number of frames. skip=0 identifies the caller of // the provided number of frames. skip=0 identifies the caller of
// captureStacktrace. // Capture.
// //
// The caller must call Free on the returned stacktrace after using it. // The caller must call Free on the returned stacktrace after using it.
func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { func Capture(skip int, depth Depth) *Stack {
stack := _stacktracePool.Get().(*stacktrace) stack := _stackPool.Get()
switch depth { switch depth {
case stacktraceFirst: case First:
stack.pcs = stack.storage[:1] stack.pcs = stack.storage[:1]
case stacktraceFull: case Full:
stack.pcs = stack.storage stack.pcs = stack.storage
} }
@ -87,7 +88,7 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
// runtime.Callers truncates the recorded stacktrace if there is no // runtime.Callers truncates the recorded stacktrace if there is no
// room in the provided slice. For the full stack trace, keep expanding // room in the provided slice. For the full stack trace, keep expanding
// storage until there are fewer frames than there is room. // storage until there are fewer frames than there is room.
if depth == stacktraceFull { if depth == Full {
pcs := stack.pcs pcs := stack.pcs
for numFrames == len(pcs) { for numFrames == len(pcs) {
pcs = make([]uintptr, len(pcs)*2) pcs = make([]uintptr, len(pcs)*2)
@ -109,52 +110,56 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
// Free releases resources associated with this stacktrace // Free releases resources associated with this stacktrace
// and returns it back to the pool. // and returns it back to the pool.
func (st *stacktrace) Free() { func (st *Stack) Free() {
st.frames = nil st.frames = nil
st.pcs = nil st.pcs = nil
_stacktracePool.Put(st) _stackPool.Put(st)
} }
// Count reports the total number of frames in this stacktrace. // Count reports the total number of frames in this stacktrace.
// Count DOES NOT change as Next is called. // Count DOES NOT change as Next is called.
func (st *stacktrace) Count() int { func (st *Stack) Count() int {
return len(st.pcs) return len(st.pcs)
} }
// Next returns the next frame in the stack trace, // Next returns the next frame in the stack trace,
// and a boolean indicating whether there are more after it. // and a boolean indicating whether there are more after it.
func (st *stacktrace) Next() (_ runtime.Frame, more bool) { func (st *Stack) Next() (_ runtime.Frame, more bool) {
return st.frames.Next() return st.frames.Next()
} }
func takeStacktrace(skip int) string { // Take returns a string representation of the current stacktrace.
stack := captureStacktrace(skip+1, stacktraceFull) //
// skip is the number of frames to skip before recording the stack trace.
// skip=0 identifies the caller of Take.
func Take(skip int) string {
stack := Capture(skip+1, Full)
defer stack.Free() defer stack.Free()
buffer := bufferpool.Get() buffer := bufferpool.Get()
defer buffer.Free() defer buffer.Free()
stackfmt := newStackFormatter(buffer) stackfmt := NewFormatter(buffer)
stackfmt.FormatStack(stack) stackfmt.FormatStack(stack)
return buffer.String() return buffer.String()
} }
// stackFormatter formats a stack trace into a readable string representation. // Formatter formats a stack trace into a readable string representation.
type stackFormatter struct { type Formatter struct {
b *buffer.Buffer b *buffer.Buffer
nonEmpty bool // whehther we've written at least one frame already nonEmpty bool // whehther we've written at least one frame already
} }
// newStackFormatter builds a new stackFormatter. // NewFormatter builds a new Formatter.
func newStackFormatter(b *buffer.Buffer) stackFormatter { func NewFormatter(b *buffer.Buffer) Formatter {
return stackFormatter{b: b} return Formatter{b: b}
} }
// FormatStack formats all remaining frames in the provided stacktrace -- minus // FormatStack formats all remaining frames in the provided stacktrace -- minus
// the final runtime.main/runtime.goexit frame. // the final runtime.main/runtime.goexit frame.
func (sf *stackFormatter) FormatStack(stack *stacktrace) { func (sf *Formatter) FormatStack(stack *Stack) {
// Note: On the last iteration, frames.Next() returns false, with a valid // Note: On the last iteration, frames.Next() returns false, with a valid
// frame, but we ignore this frame. The last frame is a a runtime frame which // frame, but we ignore this frame. The last frame is a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit. // adds noise, since it's only either runtime.main or runtime.goexit.
for frame, more := stack.Next(); more; frame, more = stack.Next() { for frame, more := stack.Next(); more; frame, more = stack.Next() {
sf.FormatFrame(frame) sf.FormatFrame(frame)
@ -162,7 +167,7 @@ func (sf *stackFormatter) FormatStack(stack *stacktrace) {
} }
// FormatFrame formats the given frame. // FormatFrame formats the given frame.
func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { func (sf *Formatter) FormatFrame(frame runtime.Frame) {
if sf.nonEmpty { if sf.nonEmpty {
sf.b.AppendByte('\n') sf.b.AppendByte('\n')
} }

12
vendor/go.uber.org/zap/level.go generated vendored
View File

@ -21,7 +21,9 @@
package zap package zap
import ( import (
"go.uber.org/atomic" "sync/atomic"
"go.uber.org/zap/internal"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
) )
@ -70,12 +72,14 @@ type AtomicLevel struct {
l *atomic.Int32 l *atomic.Int32
} }
var _ internal.LeveledEnabler = AtomicLevel{}
// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging // NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
// enabled. // enabled.
func NewAtomicLevel() AtomicLevel { func NewAtomicLevel() AtomicLevel {
return AtomicLevel{ lvl := AtomicLevel{l: new(atomic.Int32)}
l: atomic.NewInt32(int32(InfoLevel)), lvl.l.Store(int32(InfoLevel))
} return lvl
} }
// NewAtomicLevelAt is a convenience function that creates an AtomicLevel // NewAtomicLevelAt is a convenience function that creates an AtomicLevel

103
vendor/go.uber.org/zap/logger.go generated vendored
View File

@ -22,11 +22,12 @@ package zap
import ( import (
"fmt" "fmt"
"io/ioutil" "io"
"os" "os"
"strings" "strings"
"go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
) )
@ -42,7 +43,7 @@ type Logger struct {
development bool development bool
addCaller bool addCaller bool
onFatal zapcore.CheckWriteAction // default is WriteThenFatal onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string name string
errorOutput zapcore.WriteSyncer errorOutput zapcore.WriteSyncer
@ -85,7 +86,7 @@ func New(core zapcore.Core, options ...Option) *Logger {
func NewNop() *Logger { func NewNop() *Logger {
return &Logger{ return &Logger{
core: zapcore.NewNopCore(), core: zapcore.NewNopCore(),
errorOutput: zapcore.AddSync(ioutil.Discard), errorOutput: zapcore.AddSync(io.Discard),
addStack: zapcore.FatalLevel + 1, addStack: zapcore.FatalLevel + 1,
clock: zapcore.DefaultClock, clock: zapcore.DefaultClock,
} }
@ -107,6 +108,19 @@ func NewDevelopment(options ...Option) (*Logger, error) {
return NewDevelopmentConfig().Build(options...) return NewDevelopmentConfig().Build(options...)
} }
// Must is a helper that wraps a call to a function returning (*Logger, error)
// and panics if the error is non-nil. It is intended for use in variable
// initialization such as:
//
// var logger = zap.Must(zap.NewProduction())
func Must(logger *Logger, err error) *Logger {
if err != nil {
panic(err)
}
return logger
}
// NewExample builds a Logger that's designed for use in zap's testable // NewExample builds a Logger that's designed for use in zap's testable
// examples. It writes DebugLevel and above logs to standard out as JSON, but // examples. It writes DebugLevel and above logs to standard out as JSON, but
// omits the timestamp and calling function to keep example output // omits the timestamp and calling function to keep example output
@ -160,7 +174,8 @@ func (log *Logger) WithOptions(opts ...Option) *Logger {
} }
// With creates a child logger and adds structured context to it. Fields added // With creates a child logger and adds structured context to it. Fields added
// to the child don't affect the parent, and vice versa. // to the child don't affect the parent, and vice versa. Any fields that
// require evaluation (such as Objects) are evaluated upon invocation of With.
func (log *Logger) With(fields ...Field) *Logger { func (log *Logger) With(fields ...Field) *Logger {
if len(fields) == 0 { if len(fields) == 0 {
return log return log
@ -170,6 +185,35 @@ func (log *Logger) With(fields ...Field) *Logger {
return l return l
} }
// WithLazy creates a child logger and adds structured context to it lazily.
//
// The fields are evaluated only if the logger is further chained with [With]
// or is written to with any of the log level methods.
// Until that occurs, the logger may retain references to objects inside the fields,
// and logging will reflect the state of an object at the time of logging,
// not the time of WithLazy().
//
// WithLazy provides a worthwhile performance optimization for contextual loggers
// when the likelihood of using the child logger is low,
// such as error paths and rarely taken branches.
//
// Similar to [With], fields added to the child don't affect the parent, and vice versa.
func (log *Logger) WithLazy(fields ...Field) *Logger {
if len(fields) == 0 {
return log
}
return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewLazyWith(core, fields)
}))
}
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
func (log *Logger) Level() zapcore.Level {
return zapcore.LevelOf(log.core)
}
// Check returns a CheckedEntry if logging a message at the specified level // Check returns a CheckedEntry if logging a message at the specified level
// is enabled. It's a completely optional optimization; in high-performance // is enabled. It's a completely optional optimization; in high-performance
// applications, Check can help avoid allocating a slice to hold fields. // applications, Check can help avoid allocating a slice to hold fields.
@ -177,6 +221,16 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return log.check(lvl, msg) return log.check(lvl, msg)
} }
// Log logs a message at the specified level. The message includes any fields
// passed at the log site, as well as any fields accumulated on the logger.
// Any Fields that require evaluation (such as Objects) are evaluated upon
// invocation of Log.
func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
if ce := log.check(lvl, msg); ce != nil {
ce.Write(fields...)
}
}
// Debug logs a message at DebugLevel. The message includes any fields passed // Debug logs a message at DebugLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger. // at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Debug(msg string, fields ...Field) { func (log *Logger) Debug(msg string, fields ...Field) {
@ -253,9 +307,15 @@ func (log *Logger) Core() zapcore.Core {
return log.core return log.core
} }
// Name returns the Logger's underlying name,
// or an empty string if the logger is unnamed.
func (log *Logger) Name() string {
return log.name
}
func (log *Logger) clone() *Logger { func (log *Logger) clone() *Logger {
copy := *log clone := *log
return &copy return &clone
} }
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
@ -285,18 +345,27 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Set up any required terminal behavior. // Set up any required terminal behavior.
switch ent.Level { switch ent.Level {
case zapcore.PanicLevel: case zapcore.PanicLevel:
ce = ce.Should(ent, zapcore.WriteThenPanic) ce = ce.After(ent, zapcore.WriteThenPanic)
case zapcore.FatalLevel: case zapcore.FatalLevel:
onFatal := log.onFatal onFatal := log.onFatal
// Noop is the default value for CheckWriteAction, and it leads to // nil or WriteThenNoop will lead to continued execution after
// continued execution after a Fatal which is unexpected. // a Fatal log entry, which is unexpected. For example,
if onFatal == zapcore.WriteThenNoop { //
// f, err := os.Open(..)
// if err != nil {
// log.Fatal("cannot open", zap.Error(err))
// }
// fmt.Println(f.Name())
//
// The f.Name() will panic if we continue execution after the
// log.Fatal.
if onFatal == nil || onFatal == zapcore.WriteThenNoop {
onFatal = zapcore.WriteThenFatal onFatal = zapcore.WriteThenFatal
} }
ce = ce.Should(ent, onFatal) ce = ce.After(ent, onFatal)
case zapcore.DPanicLevel: case zapcore.DPanicLevel:
if log.development { if log.development {
ce = ce.Should(ent, zapcore.WriteThenPanic) ce = ce.After(ent, zapcore.WriteThenPanic)
} }
} }
@ -317,17 +386,17 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Adding the caller or stack trace requires capturing the callers of // Adding the caller or stack trace requires capturing the callers of
// this function. We'll share information between these two. // this function. We'll share information between these two.
stackDepth := stacktraceFirst stackDepth := stacktrace.First
if addStack { if addStack {
stackDepth = stacktraceFull stackDepth = stacktrace.Full
} }
stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
defer stack.Free() defer stack.Free()
if stack.Count() == 0 { if stack.Count() == 0 {
if log.addCaller { if log.addCaller {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
log.errorOutput.Sync() _ = log.errorOutput.Sync()
} }
return ce return ce
} }
@ -348,7 +417,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
buffer := bufferpool.Get() buffer := bufferpool.Get()
defer buffer.Free() defer buffer.Free()
stackfmt := newStackFormatter(buffer) stackfmt := stacktrace.NewFormatter(buffer)
// We've already extracted the first frame, so format that // We've already extracted the first frame, so format that
// separately and defer to stackfmt for the rest. // separately and defer to stackfmt for the rest.

21
vendor/go.uber.org/zap/options.go generated vendored
View File

@ -133,9 +133,28 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
} }
// OnFatal sets the action to take on fatal logs. // OnFatal sets the action to take on fatal logs.
//
// Deprecated: Use [WithFatalHook] instead.
func OnFatal(action zapcore.CheckWriteAction) Option { func OnFatal(action zapcore.CheckWriteAction) Option {
return WithFatalHook(action)
}
// WithFatalHook sets a CheckWriteHook to run on fatal logs.
// Zap will call this hook after writing a log statement with a Fatal level.
//
// For example, the following builds a logger that will exit the current
// goroutine after writing a fatal log message, but it will not exit the
// program.
//
// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
//
// It is important that the provided CheckWriteHook stops the control flow at
// the current statement to meet expectations of callers of the logger.
// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
// minimum.
func WithFatalHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) { return optionFunc(func(log *Logger) {
log.onFatal = action log.onFatal = hook
}) })
} }

101
vendor/go.uber.org/zap/sink.go generated vendored
View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc. // Copyright (c) 2016-2022 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -26,6 +26,7 @@ import (
"io" "io"
"net/url" "net/url"
"os" "os"
"path/filepath"
"strings" "strings"
"sync" "sync"
@ -34,23 +35,7 @@ import (
const schemeFile = "file" const schemeFile = "file"
var ( var _sinkRegistry = newSinkRegistry()
_sinkMutex sync.RWMutex
_sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
)
func init() {
resetSinkRegistry()
}
func resetSinkRegistry() {
_sinkMutex.Lock()
defer _sinkMutex.Unlock()
_sinkFactories = map[string]func(*url.URL) (Sink, error){
schemeFile: newFileSink,
}
}
// Sink defines the interface to write to and close logger destinations. // Sink defines the interface to write to and close logger destinations.
type Sink interface { type Sink interface {
@ -58,10 +43,6 @@ type Sink interface {
io.Closer io.Closer
} }
type nopCloserSink struct{ zapcore.WriteSyncer }
func (nopCloserSink) Close() error { return nil }
type errSinkNotFound struct { type errSinkNotFound struct {
scheme string scheme string
} }
@ -70,16 +51,30 @@ func (e *errSinkNotFound) Error() string {
return fmt.Sprintf("no sink found for scheme %q", e.scheme) return fmt.Sprintf("no sink found for scheme %q", e.scheme)
} }
// RegisterSink registers a user-supplied factory for all sinks with a type nopCloserSink struct{ zapcore.WriteSyncer }
// particular scheme.
// func (nopCloserSink) Close() error { return nil }
// All schemes must be ASCII, valid under section 3.1 of RFC 3986
// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already type sinkRegistry struct {
// have a factory registered. Zap automatically registers a factory for the mu sync.Mutex
// "file" scheme. factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
_sinkMutex.Lock() }
defer _sinkMutex.Unlock()
func newSinkRegistry() *sinkRegistry {
sr := &sinkRegistry{
factories: make(map[string]func(*url.URL) (Sink, error)),
openFile: os.OpenFile,
}
// Infallible operation: the registry is empty, so we can't have a conflict.
_ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
return sr
}
// RegisterScheme registers the given factory for the specific scheme.
func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
sr.mu.Lock()
defer sr.mu.Unlock()
if scheme == "" { if scheme == "" {
return errors.New("can't register a sink factory for empty string") return errors.New("can't register a sink factory for empty string")
@ -88,14 +83,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
if err != nil { if err != nil {
return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
} }
if _, ok := _sinkFactories[normalized]; ok { if _, ok := sr.factories[normalized]; ok {
return fmt.Errorf("sink factory already registered for scheme %q", normalized) return fmt.Errorf("sink factory already registered for scheme %q", normalized)
} }
_sinkFactories[normalized] = factory sr.factories[normalized] = factory
return nil return nil
} }
func newSink(rawURL string) (Sink, error) { func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
// URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
// the drive, and path is unset unless `c:/log.txt` is used.
// To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
// filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
if filepath.IsAbs(rawURL) {
return sr.newFileSinkFromPath(rawURL)
}
u, err := url.Parse(rawURL) u, err := url.Parse(rawURL)
if err != nil { if err != nil {
return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
@ -104,16 +107,27 @@ func newSink(rawURL string) (Sink, error) {
u.Scheme = schemeFile u.Scheme = schemeFile
} }
_sinkMutex.RLock() sr.mu.Lock()
factory, ok := _sinkFactories[u.Scheme] factory, ok := sr.factories[u.Scheme]
_sinkMutex.RUnlock() sr.mu.Unlock()
if !ok { if !ok {
return nil, &errSinkNotFound{u.Scheme} return nil, &errSinkNotFound{u.Scheme}
} }
return factory(u) return factory(u)
} }
func newFileSink(u *url.URL) (Sink, error) { // RegisterSink registers a user-supplied factory for all sinks with a
// particular scheme.
//
// All schemes must be ASCII, valid under section 0.1 of RFC 3986
// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
// have a factory registered. Zap automatically registers a factory for the
// "file" scheme.
func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
return _sinkRegistry.RegisterSink(scheme, factory)
}
func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
if u.User != nil { if u.User != nil {
return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
} }
@ -130,13 +144,18 @@ func newFileSink(u *url.URL) (Sink, error) {
if hn := u.Hostname(); hn != "" && hn != "localhost" { if hn := u.Hostname(); hn != "" && hn != "localhost" {
return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
} }
switch u.Path {
return sr.newFileSinkFromPath(u.Path)
}
func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
switch path {
case "stdout": case "stdout":
return nopCloserSink{os.Stdout}, nil return nopCloserSink{os.Stdout}, nil
case "stderr": case "stderr":
return nopCloserSink{os.Stderr}, nil return nopCloserSink{os.Stderr}, nil
} }
return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
} }
func normalizeScheme(s string) (string, error) { func normalizeScheme(s string) (string, error) {

200
vendor/go.uber.org/zap/sugar.go generated vendored
View File

@ -31,6 +31,7 @@ import (
const ( const (
_oddNumberErrMsg = "Ignored key without a value." _oddNumberErrMsg = "Ignored key without a value."
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
_multipleErrMsg = "Multiple errors without a key."
) )
// A SugaredLogger wraps the base Logger functionality in a slower, but less // A SugaredLogger wraps the base Logger functionality in a slower, but less
@ -38,10 +39,19 @@ const (
// method. // method.
// //
// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. // Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
// For each log level, it exposes three methods: one for loosely-typed // For each log level, it exposes four methods:
// structured logging, one for println-style formatting, and one for //
// printf-style formatting. For example, SugaredLoggers can produce InfoLevel // - methods named after the log level for log.Print-style logging
// output with Infow ("info with" structured context), Info, or Infof. // - methods ending in "w" for loosely-typed structured logging
// - methods ending in "f" for log.Printf-style logging
// - methods ending in "ln" for log.Println-style logging
//
// For example, the methods for InfoLevel are:
//
// Info(...any) Print-style logging
// Infow(...any) Structured logging (read as "info with")
// Infof(string, ...any) Printf-style logging
// Infoln(...any) Println-style logging
type SugaredLogger struct { type SugaredLogger struct {
base *Logger base *Logger
} }
@ -61,27 +71,40 @@ func (s *SugaredLogger) Named(name string) *SugaredLogger {
return &SugaredLogger{base: s.base.Named(name)} return &SugaredLogger{base: s.base.Named(name)}
} }
// WithOptions clones the current SugaredLogger, applies the supplied Options,
// and returns the result. It's safe to use concurrently.
func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
base := s.base.clone()
for _, opt := range opts {
opt.apply(base)
}
return &SugaredLogger{base: base}
}
// With adds a variadic number of fields to the logging context. It accepts a // With adds a variadic number of fields to the logging context. It accepts a
// mix of strongly-typed Field objects and loosely-typed key-value pairs. When // mix of strongly-typed Field objects and loosely-typed key-value pairs. When
// processing pairs, the first element of the pair is used as the field key // processing pairs, the first element of the pair is used as the field key
// and the second as the field value. // and the second as the field value.
// //
// For example, // For example,
// sugaredLogger.With( //
// "hello", "world", // sugaredLogger.With(
// "failure", errors.New("oh no"), // "hello", "world",
// Stack(), // "failure", errors.New("oh no"),
// "count", 42, // Stack(),
// "user", User{Name: "alice"}, // "count", 42,
// ) // "user", User{Name: "alice"},
// )
//
// is the equivalent of // is the equivalent of
// unsugared.With( //
// String("hello", "world"), // unsugared.With(
// String("failure", "oh no"), // String("hello", "world"),
// Stack(), // String("failure", "oh no"),
// Int("count", 42), // Stack(),
// Object("user", User{Name: "alice"}), // Int("count", 42),
// ) // Object("user", User{Name: "alice"}),
// )
// //
// Note that the keys in key-value pairs should be strings. In development, // Note that the keys in key-value pairs should be strings. In development,
// passing a non-string key panics. In production, the logger is more // passing a non-string key panics. In production, the logger is more
@ -92,74 +115,95 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
} }
// Debug uses fmt.Sprint to construct and log a message. // Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
func (s *SugaredLogger) Level() zapcore.Level {
return zapcore.LevelOf(s.base.core)
}
// Debug logs the provided arguments at [DebugLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Debug(args ...interface{}) { func (s *SugaredLogger) Debug(args ...interface{}) {
s.log(DebugLevel, "", args, nil) s.log(DebugLevel, "", args, nil)
} }
// Info uses fmt.Sprint to construct and log a message. // Info logs the provided arguments at [InfoLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Info(args ...interface{}) { func (s *SugaredLogger) Info(args ...interface{}) {
s.log(InfoLevel, "", args, nil) s.log(InfoLevel, "", args, nil)
} }
// Warn uses fmt.Sprint to construct and log a message. // Warn logs the provided arguments at [WarnLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Warn(args ...interface{}) { func (s *SugaredLogger) Warn(args ...interface{}) {
s.log(WarnLevel, "", args, nil) s.log(WarnLevel, "", args, nil)
} }
// Error uses fmt.Sprint to construct and log a message. // Error logs the provided arguments at [ErrorLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Error(args ...interface{}) { func (s *SugaredLogger) Error(args ...interface{}) {
s.log(ErrorLevel, "", args, nil) s.log(ErrorLevel, "", args, nil)
} }
// DPanic uses fmt.Sprint to construct and log a message. In development, the // DPanic logs the provided arguments at [DPanicLevel].
// logger then panics. (See DPanicLevel for details.) // In development, the logger then panics. (See [DPanicLevel] for details.)
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) DPanic(args ...interface{}) { func (s *SugaredLogger) DPanic(args ...interface{}) {
s.log(DPanicLevel, "", args, nil) s.log(DPanicLevel, "", args, nil)
} }
// Panic uses fmt.Sprint to construct and log a message, then panics. // Panic constructs a message with the provided arguments and panics.
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Panic(args ...interface{}) { func (s *SugaredLogger) Panic(args ...interface{}) {
s.log(PanicLevel, "", args, nil) s.log(PanicLevel, "", args, nil)
} }
// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. // Fatal constructs a message with the provided arguments and calls os.Exit.
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Fatal(args ...interface{}) { func (s *SugaredLogger) Fatal(args ...interface{}) {
s.log(FatalLevel, "", args, nil) s.log(FatalLevel, "", args, nil)
} }
// Debugf uses fmt.Sprintf to log a templated message. // Debugf formats the message according to the format specifier
// and logs it at [DebugLevel].
func (s *SugaredLogger) Debugf(template string, args ...interface{}) { func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
s.log(DebugLevel, template, args, nil) s.log(DebugLevel, template, args, nil)
} }
// Infof uses fmt.Sprintf to log a templated message. // Infof formats the message according to the format specifier
// and logs it at [InfoLevel].
func (s *SugaredLogger) Infof(template string, args ...interface{}) { func (s *SugaredLogger) Infof(template string, args ...interface{}) {
s.log(InfoLevel, template, args, nil) s.log(InfoLevel, template, args, nil)
} }
// Warnf uses fmt.Sprintf to log a templated message. // Warnf formats the message according to the format specifier
// and logs it at [WarnLevel].
func (s *SugaredLogger) Warnf(template string, args ...interface{}) { func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
s.log(WarnLevel, template, args, nil) s.log(WarnLevel, template, args, nil)
} }
// Errorf uses fmt.Sprintf to log a templated message. // Errorf formats the message according to the format specifier
// and logs it at [ErrorLevel].
func (s *SugaredLogger) Errorf(template string, args ...interface{}) { func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
s.log(ErrorLevel, template, args, nil) s.log(ErrorLevel, template, args, nil)
} }
// DPanicf uses fmt.Sprintf to log a templated message. In development, the // DPanicf formats the message according to the format specifier
// logger then panics. (See DPanicLevel for details.) // and logs it at [DPanicLevel].
// In development, the logger then panics. (See [DPanicLevel] for details.)
func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
s.log(DPanicLevel, template, args, nil) s.log(DPanicLevel, template, args, nil)
} }
// Panicf uses fmt.Sprintf to log a templated message, then panics. // Panicf formats the message according to the format specifier
// and panics.
func (s *SugaredLogger) Panicf(template string, args ...interface{}) { func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
s.log(PanicLevel, template, args, nil) s.log(PanicLevel, template, args, nil)
} }
// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. // Fatalf formats the message according to the format specifier
// and calls os.Exit.
func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
s.log(FatalLevel, template, args, nil) s.log(FatalLevel, template, args, nil)
} }
@ -168,7 +212,8 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
// pairs are treated as they are in With. // pairs are treated as they are in With.
// //
// When debug-level logging is disabled, this is much faster than // When debug-level logging is disabled, this is much faster than
// s.With(keysAndValues).Debug(msg) //
// s.With(keysAndValues).Debug(msg)
func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
s.log(DebugLevel, msg, nil, keysAndValues) s.log(DebugLevel, msg, nil, keysAndValues)
} }
@ -210,11 +255,55 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
s.log(FatalLevel, msg, nil, keysAndValues) s.log(FatalLevel, msg, nil, keysAndValues)
} }
// Debugln logs a message at [DebugLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Debugln(args ...interface{}) {
s.logln(DebugLevel, args, nil)
}
// Infoln logs a message at [InfoLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Infoln(args ...interface{}) {
s.logln(InfoLevel, args, nil)
}
// Warnln logs a message at [WarnLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Warnln(args ...interface{}) {
s.logln(WarnLevel, args, nil)
}
// Errorln logs a message at [ErrorLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Errorln(args ...interface{}) {
s.logln(ErrorLevel, args, nil)
}
// DPanicln logs a message at [DPanicLevel].
// In development, the logger then panics. (See [DPanicLevel] for details.)
// Spaces are always added between arguments.
func (s *SugaredLogger) DPanicln(args ...interface{}) {
s.logln(DPanicLevel, args, nil)
}
// Panicln logs a message at [PanicLevel] and panics.
// Spaces are always added between arguments.
func (s *SugaredLogger) Panicln(args ...interface{}) {
s.logln(PanicLevel, args, nil)
}
// Fatalln logs a message at [FatalLevel] and calls os.Exit.
// Spaces are always added between arguments.
func (s *SugaredLogger) Fatalln(args ...interface{}) {
s.logln(FatalLevel, args, nil)
}
// Sync flushes any buffered log entries. // Sync flushes any buffered log entries.
func (s *SugaredLogger) Sync() error { func (s *SugaredLogger) Sync() error {
return s.base.Sync() return s.base.Sync()
} }
// log message with Sprint, Sprintf, or neither.
func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
// If logging at this level is completely disabled, skip the overhead of // If logging at this level is completely disabled, skip the overhead of
// string formatting. // string formatting.
@ -228,6 +317,18 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf
} }
} }
// logln message with Sprintln
func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
return
}
msg := getMessageln(fmtArgs)
if ce := s.base.Check(lvl, msg); ce != nil {
ce.Write(s.sweetenFields(context)...)
}
}
// getMessage format with Sprint, Sprintf, or neither. // getMessage format with Sprint, Sprintf, or neither.
func getMessage(template string, fmtArgs []interface{}) string { func getMessage(template string, fmtArgs []interface{}) string {
if len(fmtArgs) == 0 { if len(fmtArgs) == 0 {
@ -246,15 +347,24 @@ func getMessage(template string, fmtArgs []interface{}) string {
return fmt.Sprint(fmtArgs...) return fmt.Sprint(fmtArgs...)
} }
// getMessageln format with Sprintln.
func getMessageln(fmtArgs []interface{}) string {
msg := fmt.Sprintln(fmtArgs...)
return msg[:len(msg)-1]
}
func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
if len(args) == 0 { if len(args) == 0 {
return nil return nil
} }
// Allocate enough space for the worst case; if users pass only structured var (
// fields, we shouldn't penalize them with extra allocations. // Allocate enough space for the worst case; if users pass only structured
fields := make([]Field, 0, len(args)) // fields, we shouldn't penalize them with extra allocations.
var invalid invalidPairs fields = make([]Field, 0, len(args))
invalid invalidPairs
seenError bool
)
for i := 0; i < len(args); { for i := 0; i < len(args); {
// This is a strongly-typed field. Consume it and move on. // This is a strongly-typed field. Consume it and move on.
@ -264,6 +374,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
continue continue
} }
// If it is an error, consume it and move on.
if err, ok := args[i].(error); ok {
if !seenError {
seenError = true
fields = append(fields, Error(err))
} else {
s.base.Error(_multipleErrMsg, Error(err))
}
i++
continue
}
// Make sure this element isn't a dangling key. // Make sure this element isn't a dangling key.
if i == len(args)-1 { if i == len(args)-1 {
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))

23
vendor/go.uber.org/zap/writer.go generated vendored
View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc. // Copyright (c) 2016-2022 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -23,7 +23,6 @@ package zap
import ( import (
"fmt" "fmt"
"io" "io"
"io/ioutil"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
@ -49,40 +48,40 @@ import (
// os.Stdout and os.Stderr. When specified without a scheme, relative file // os.Stdout and os.Stderr. When specified without a scheme, relative file
// paths also work. // paths also work.
func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
writers, close, err := open(paths) writers, closeAll, err := open(paths)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
writer := CombineWriteSyncers(writers...) writer := CombineWriteSyncers(writers...)
return writer, close, nil return writer, closeAll, nil
} }
func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
writers := make([]zapcore.WriteSyncer, 0, len(paths)) writers := make([]zapcore.WriteSyncer, 0, len(paths))
closers := make([]io.Closer, 0, len(paths)) closers := make([]io.Closer, 0, len(paths))
close := func() { closeAll := func() {
for _, c := range closers { for _, c := range closers {
c.Close() _ = c.Close()
} }
} }
var openErr error var openErr error
for _, path := range paths { for _, path := range paths {
sink, err := newSink(path) sink, err := _sinkRegistry.newSink(path)
if err != nil { if err != nil {
openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
continue continue
} }
writers = append(writers, sink) writers = append(writers, sink)
closers = append(closers, sink) closers = append(closers, sink)
} }
if openErr != nil { if openErr != nil {
close() closeAll()
return writers, nil, openErr return nil, nil, openErr
} }
return writers, close, nil return writers, closeAll, nil
} }
// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a // CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
@ -93,7 +92,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. // using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
if len(writers) == 0 { if len(writers) == 0 {
return zapcore.AddSync(ioutil.Discard) return zapcore.AddSync(io.Discard)
} }
return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
} }

View File

@ -43,6 +43,37 @@ const (
// //
// BufferedWriteSyncer is safe for concurrent use. You don't need to use // BufferedWriteSyncer is safe for concurrent use. You don't need to use
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. // zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
//
// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
// destination (*os.File is a valid WriteSyncer), wrap it with
// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
// object.
//
// func main() {
// ws := ... // your log destination
// bws := &zapcore.BufferedWriteSyncer{WS: ws}
// defer bws.Stop()
//
// // ...
// core := zapcore.NewCore(enc, bws, lvl)
// logger := zap.New(core)
//
// // ...
// }
//
// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
// waiting at most 30 seconds between flushes.
// You can customize these parameters by setting the Size or FlushInterval
// fields.
// For example, the following buffers up to 512 kB of logs before flushing them
// to Stderr, with a maximum of one minute between each flush.
//
// ws := &BufferedWriteSyncer{
// WS: os.Stderr,
// Size: 512 * 1024, // 512 kB
// FlushInterval: time.Minute,
// }
// defer ws.Stop()
type BufferedWriteSyncer struct { type BufferedWriteSyncer struct {
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer // WS is the WriteSyncer around which BufferedWriteSyncer will buffer
// writes. // writes.

View File

@ -22,20 +22,20 @@ package zapcore
import ( import (
"fmt" "fmt"
"sync"
"go.uber.org/zap/buffer" "go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/pool"
) )
var _sliceEncoderPool = sync.Pool{ var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder {
New: func() interface{} { return &sliceArrayEncoder{
return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} elems: make([]interface{}, 0, 2),
}, }
} })
func getSliceEncoder() *sliceArrayEncoder { func getSliceEncoder() *sliceArrayEncoder {
return _sliceEncoderPool.Get().(*sliceArrayEncoder) return _sliceEncoderPool.Get()
} }
func putSliceEncoder(e *sliceArrayEncoder) { func putSliceEncoder(e *sliceArrayEncoder) {

View File

@ -69,6 +69,15 @@ type ioCore struct {
out WriteSyncer out WriteSyncer
} }
var (
_ Core = (*ioCore)(nil)
_ leveledEnabler = (*ioCore)(nil)
)
func (c *ioCore) Level() Level {
return LevelOf(c.LevelEnabler)
}
func (c *ioCore) With(fields []Field) Core { func (c *ioCore) With(fields []Field) Core {
clone := c.clone() clone := c.clone()
addFields(clone.enc, fields) addFields(clone.enc, fields)
@ -93,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error {
return err return err
} }
if ent.Level > ErrorLevel { if ent.Level > ErrorLevel {
// Since we may be crashing the program, sync the output. Ignore Sync // Since we may be crashing the program, sync the output.
// errors, pending a clean solution to issue #370. // Ignore Sync errors, pending a clean solution to issue #370.
c.Sync() _ = c.Sync()
} }
return nil return nil
} }

View File

@ -188,10 +188,13 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error {
// UnmarshalYAML unmarshals YAML to a TimeEncoder. // UnmarshalYAML unmarshals YAML to a TimeEncoder.
// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. // If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
// timeEncoder: //
// layout: 06/01/02 03:04pm // timeEncoder:
// layout: 06/01/02 03:04pm
//
// If value is string, it uses UnmarshalText. // If value is string, it uses UnmarshalText.
// timeEncoder: iso8601 //
// timeEncoder: iso8601
func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
var o struct { var o struct {
Layout string `json:"layout" yaml:"layout"` Layout string `json:"layout" yaml:"layout"`

View File

@ -24,26 +24,23 @@ import (
"fmt" "fmt"
"runtime" "runtime"
"strings" "strings"
"sync"
"time" "time"
"go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit" "go.uber.org/zap/internal/exit"
"go.uber.org/zap/internal/pool"
"go.uber.org/multierr"
) )
var ( var _cePool = pool.New(func() *CheckedEntry {
_cePool = sync.Pool{New: func() interface{} { // Pre-allocate some space for cores.
// Pre-allocate some space for cores. return &CheckedEntry{
return &CheckedEntry{ cores: make([]Core, 4),
cores: make([]Core, 4), }
} })
}}
)
func getCheckedEntry() *CheckedEntry { func getCheckedEntry() *CheckedEntry {
ce := _cePool.Get().(*CheckedEntry) ce := _cePool.Get()
ce.reset() ce.reset()
return ce return ce
} }
@ -152,6 +149,27 @@ type Entry struct {
Stack string Stack string
} }
// CheckWriteHook is a custom action that may be executed after an entry is
// written.
//
// Register one on a CheckedEntry with the After method.
//
// if ce := logger.Check(...); ce != nil {
// ce = ce.After(hook)
// ce.Write(...)
// }
//
// You can configure the hook for Fatal log statements at the logger level with
// the zap.WithFatalHook option.
type CheckWriteHook interface {
// OnWrite is invoked with the CheckedEntry that was written and a list
// of fields added with that entry.
//
// The list of fields DOES NOT include fields that were already added
// to the logger with the With method.
OnWrite(*CheckedEntry, []Field)
}
// CheckWriteAction indicates what action to take after a log entry is // CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity. // processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8 type CheckWriteAction uint8
@ -164,21 +182,36 @@ const (
WriteThenGoexit WriteThenGoexit
// WriteThenPanic causes a panic after Write. // WriteThenPanic causes a panic after Write.
WriteThenPanic WriteThenPanic
// WriteThenFatal causes a fatal os.Exit after Write. // WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal WriteThenFatal
) )
// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
// with the new CheckWriteHook interface which deprecates CheckWriteAction.
func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
switch a {
case WriteThenGoexit:
runtime.Goexit()
case WriteThenPanic:
panic(ce.Message)
case WriteThenFatal:
exit.With(1)
}
}
var _ CheckWriteHook = CheckWriteAction(0)
// CheckedEntry is an Entry together with a collection of Cores that have // CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it. // already agreed to log it.
// //
// CheckedEntry references should be created by calling AddCore or Should on a // CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST // nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method. // NOT be retained after calling their Write method.
type CheckedEntry struct { type CheckedEntry struct {
Entry Entry
ErrorOutput WriteSyncer ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse dirty bool // best-effort detection of pool misuse
should CheckWriteAction after CheckWriteHook
cores []Core cores []Core
} }
@ -186,7 +219,7 @@ func (ce *CheckedEntry) reset() {
ce.Entry = Entry{} ce.Entry = Entry{}
ce.ErrorOutput = nil ce.ErrorOutput = nil
ce.dirty = false ce.dirty = false
ce.should = WriteThenNoop ce.after = nil
for i := range ce.cores { for i := range ce.cores {
// don't keep references to cores // don't keep references to cores
ce.cores[i] = nil ce.cores[i] = nil
@ -209,7 +242,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
// CheckedEntry is being used after it was returned to the pool, // CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites. // the message may be an amalgamation from multiple call sites.
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
ce.ErrorOutput.Sync() _ = ce.ErrorOutput.Sync() // ignore error
} }
return return
} }
@ -221,20 +254,14 @@ func (ce *CheckedEntry) Write(fields ...Field) {
} }
if err != nil && ce.ErrorOutput != nil { if err != nil && ce.ErrorOutput != nil {
fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
ce.ErrorOutput.Sync() _ = ce.ErrorOutput.Sync() // ignore error
} }
should, msg := ce.should, ce.Message hook := ce.after
if hook != nil {
hook.OnWrite(ce, fields)
}
putCheckedEntry(ce) putCheckedEntry(ce)
switch should {
case WriteThenPanic:
panic(msg)
case WriteThenFatal:
exit.Exit()
case WriteThenGoexit:
runtime.Goexit()
}
} }
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be // AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@ -252,11 +279,20 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a // Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's // Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references. // safe to call on nil CheckedEntry references.
//
// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
return ce.After(ent, should)
}
// After sets this CheckEntry's CheckWriteHook, which will be called after this
// log entry has been written. It's safe to call this on nil CheckedEntry
// references.
func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil { if ce == nil {
ce = getCheckedEntry() ce = getCheckedEntry()
ce.Entry = ent ce.Entry = ent
} }
ce.should = should ce.after = hook
return ce return ce
} }

View File

@ -23,7 +23,8 @@ package zapcore
import ( import (
"fmt" "fmt"
"reflect" "reflect"
"sync"
"go.uber.org/zap/internal/pool"
) )
// Encodes the given error into fields of an object. A field with the given // Encodes the given error into fields of an object. A field with the given
@ -36,13 +37,13 @@ import (
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an // causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of. // array of objects containing the errors this error was comprised of.
// //
// { // {
// "error": err.Error(), // "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err), // "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [ // "errorCauses": [
// ... // ...
// ], // ],
// } // }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling // Try to capture panics (from nil references or otherwise) when calling
// the Error() method // the Error() method
@ -97,15 +98,18 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
} }
el := newErrArrayElem(errs[i]) el := newErrArrayElem(errs[i])
arr.AppendObject(el) err := arr.AppendObject(el)
el.Free() el.Free()
if err != nil {
return err
}
} }
return nil return nil
} }
var _errArrayElemPool = sync.Pool{New: func() interface{} { var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{} return &errArrayElem{}
}} })
// Encodes any error into a {"error": ...} re-using the same errors logic. // Encodes any error into a {"error": ...} re-using the same errors logic.
// //
@ -113,7 +117,7 @@ var _errArrayElemPool = sync.Pool{New: func() interface{} {
type errArrayElem struct{ err error } type errArrayElem struct{ err error }
func newErrArrayElem(err error) *errArrayElem { func newErrArrayElem(err error) *errArrayElem {
e := _errArrayElemPool.Get().(*errArrayElem) e := _errArrayElemPool.Get()
e.err = err e.err = err
return e return e
} }

View File

@ -27,6 +27,11 @@ type hooked struct {
funcs []func(Entry) error funcs []func(Entry) error
} }
var (
_ Core = (*hooked)(nil)
_ leveledEnabler = (*hooked)(nil)
)
// RegisterHooks wraps a Core and runs a collection of user-defined callback // RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking. // hooks each time a message is logged. Execution of the callbacks is blocking.
// //
@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
} }
} }
func (h *hooked) Level() Level {
return LevelOf(h.Core)
}
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This // Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the // also gives the downstream a chance to register itself directly with the

View File

@ -27,6 +27,11 @@ type levelFilterCore struct {
level LevelEnabler level LevelEnabler
} }
var (
_ Core = (*levelFilterCore)(nil)
_ leveledEnabler = (*levelFilterCore)(nil)
)
// NewIncreaseLevelCore creates a core that can be used to increase the level of // NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts // an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level, // as a filter before calling the underlying core. If level decreases the log level,
@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool {
return c.level.Enabled(lvl) return c.level.Enabled(lvl)
} }
func (c *levelFilterCore) Level() Level {
return LevelOf(c.level)
}
func (c *levelFilterCore) With(fields []Field) Core { func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level} return &levelFilterCore{c.core.With(fields), c.level}
} }

View File

@ -23,24 +23,20 @@ package zapcore
import ( import (
"encoding/base64" "encoding/base64"
"math" "math"
"sync"
"time" "time"
"unicode/utf8" "unicode/utf8"
"go.uber.org/zap/buffer" "go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/pool"
) )
// For JSON-escaping; see jsonEncoder.safeAddString below. // For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef" const _hex = "0123456789abcdef"
var _jsonPool = sync.Pool{New: func() interface{} { var _jsonPool = pool.New(func() *jsonEncoder {
return &jsonEncoder{} return &jsonEncoder{}
}} })
func getJSONEncoder() *jsonEncoder {
return _jsonPool.Get().(*jsonEncoder)
}
func putJSONEncoder(enc *jsonEncoder) { func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil { if enc.reflectBuf != nil {
@ -71,7 +67,9 @@ type jsonEncoder struct {
// //
// Note that the encoder doesn't deduplicate keys, so it's possible to produce // Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like // a message like
// {"foo":"bar","foo":"baz"} //
// {"foo":"bar","foo":"baz"}
//
// This is permitted by the JSON specification, but not encouraged. Many // This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last // libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate // pair) when unmarshaling, but users should attempt to avoid adding duplicate
@ -352,7 +350,7 @@ func (enc *jsonEncoder) Clone() Encoder {
} }
func (enc *jsonEncoder) clone() *jsonEncoder { func (enc *jsonEncoder) clone() *jsonEncoder {
clone := getJSONEncoder() clone := _jsonPool.Get()
clone.EncoderConfig = enc.EncoderConfig clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces clone.openNamespaces = enc.openNamespaces
@ -488,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
// Unlike the standard library's encoder, it doesn't attempt to protect the // Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems. // user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) { func (enc *jsonEncoder) safeAddString(s string) {
for i := 0; i < len(s); { safeAppendStringLike(
if enc.tryAddRuneSelf(s[i]) { (*buffer.Buffer).AppendString,
i++ utf8.DecodeRuneInString,
continue enc.buf,
} s,
r, size := utf8.DecodeRuneInString(s[i:]) )
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.AppendString(s[i : i+size])
i += size
}
} }
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. // safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) { func (enc *jsonEncoder) safeAddByteString(s []byte) {
safeAppendStringLike(
(*buffer.Buffer).AppendBytes,
utf8.DecodeRune,
enc.buf,
s,
)
}
// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
// It appends a string or byte slice to the buffer, escaping all special characters.
func safeAppendStringLike[S []byte | string](
// appendTo appends this string-like object to the buffer.
appendTo func(*buffer.Buffer, S),
// decodeRune decodes the next rune from the string-like object
// and returns its value and width in bytes.
decodeRune func(S) (rune, int),
buf *buffer.Buffer,
s S,
) {
// The encoding logic below works by skipping over characters
// that can be safely copied as-is,
// until a character is found that needs special handling.
// At that point, we copy everything we've seen so far,
// and then handle that special character.
//
// last is the index of the last byte that was copied to the buffer.
last := 0
for i := 0; i < len(s); { for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) { if s[i] >= utf8.RuneSelf {
i++ // Character >= RuneSelf may be part of a multi-byte rune.
continue // They need to be decoded before we can decide how to handle them.
} r, size := decodeRune(s[i:])
r, size := utf8.DecodeRune(s[i:]) if r != utf8.RuneError || size != 1 {
if enc.tryAddRuneError(r, size) { // No special handling required.
i++ // Skip over this rune and continue.
continue i += size
} continue
enc.buf.Write(s[i : i+size]) }
i += size
}
}
// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. // Invalid UTF-8 sequence.
func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { // Replace it with the Unicode replacement character.
if b >= utf8.RuneSelf { appendTo(buf, s[last:i])
return false buf.AppendString(`\ufffd`)
}
if 0x20 <= b && b != '\\' && b != '"' {
enc.buf.AppendByte(b)
return true
}
switch b {
case '\\', '"':
enc.buf.AppendByte('\\')
enc.buf.AppendByte(b)
case '\n':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('n')
case '\r':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('r')
case '\t':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
enc.buf.AppendString(`\u00`)
enc.buf.AppendByte(_hex[b>>4])
enc.buf.AppendByte(_hex[b&0xF])
}
return true
}
func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { i++
if r == utf8.RuneError && size == 1 { last = i
enc.buf.AppendString(`\ufffd`) } else {
return true // Character < RuneSelf is a single-byte UTF-8 rune.
if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
// No escaping necessary.
// Skip over this character and continue.
i++
continue
}
// This character needs to be escaped.
appendTo(buf, s[last:i])
switch s[i] {
case '\\', '"':
buf.AppendByte('\\')
buf.AppendByte(s[i])
case '\n':
buf.AppendByte('\\')
buf.AppendByte('n')
case '\r':
buf.AppendByte('\\')
buf.AppendByte('r')
case '\t':
buf.AppendByte('\\')
buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
buf.AppendString(`\u00`)
buf.AppendByte(_hex[s[i]>>4])
buf.AppendByte(_hex[s[i]&0xF])
}
i++
last = i
}
} }
return false
// add remaining
appendTo(buf, s[last:])
} }

View File

@ -1,6 +1,4 @@
// @generated Code generated by gen-atomicwrapper. // Copyright (c) 2023 Uber Technologies, Inc.
// Copyright (c) 2020 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -20,32 +18,37 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. // THE SOFTWARE.
package atomic package zapcore
// Error is an atomic type-safe wrapper for error values. import "sync"
type Error struct {
_ nocmp // disallow non-atomic comparison
v Value type lazyWithCore struct {
Core
sync.Once
fields []Field
} }
var _zeroError error // NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
// the logger is written to (or is further chained in a lon-lazy manner).
// NewError creates a new Error. func NewLazyWith(core Core, fields []Field) Core {
func NewError(v error) *Error { return &lazyWithCore{
x := &Error{} Core: core,
if v != _zeroError { fields: fields,
x.Store(v)
} }
return x
} }
// Load atomically loads the wrapped error. func (d *lazyWithCore) initOnce() {
func (x *Error) Load() error { d.Once.Do(func() {
return unpackError(x.v.Load()) d.Core = d.Core.With(d.fields)
})
} }
// Store atomically stores the passed error. func (d *lazyWithCore) With(fields []Field) Core {
func (x *Error) Store(v error) { d.initOnce()
x.v.Store(packError(v)) return d.Core.With(fields)
}
func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
d.initOnce()
return d.Core.Check(e, ce)
} }

View File

@ -53,6 +53,11 @@ const (
_minLevel = DebugLevel _minLevel = DebugLevel
_maxLevel = FatalLevel _maxLevel = FatalLevel
// InvalidLevel is an invalid value for Level.
//
// Core implementations may panic if they see messages of this level.
InvalidLevel = _maxLevel + 1
) )
// ParseLevel parses a level based on the lower-case or all-caps ASCII // ParseLevel parses a level based on the lower-case or all-caps ASCII
@ -67,6 +72,43 @@ func ParseLevel(text string) (Level, error) {
return level, err return level, err
} }
type leveledEnabler interface {
LevelEnabler
Level() Level
}
// LevelOf reports the minimum enabled log level for the given LevelEnabler
// from Zap's supported log levels, or [InvalidLevel] if none of them are
// enabled.
//
// A LevelEnabler may implement a 'Level() Level' method to override the
// behavior of this function.
//
// func (c *core) Level() Level {
// return c.currentLevel
// }
//
// It is recommended that [Core] implementations that wrap other cores use
// LevelOf to retrieve the level of the wrapped core. For example,
//
// func (c *coreWrapper) Level() Level {
// return zapcore.LevelOf(c.wrappedCore)
// }
func LevelOf(enab LevelEnabler) Level {
if lvler, ok := enab.(leveledEnabler); ok {
return lvler.Level()
}
for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
if enab.Enabled(lvl) {
return lvl
}
}
return InvalidLevel
}
// String returns a lower-case ASCII representation of the log level. // String returns a lower-case ASCII representation of the log level.
func (l Level) String() string { func (l Level) String() string {
switch l { switch l {

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc. // Copyright (c) 2016-2022 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -21,9 +21,8 @@
package zapcore package zapcore
import ( import (
"sync/atomic"
"time" "time"
"go.uber.org/atomic"
) )
const ( const (
@ -66,16 +65,16 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
tn := t.UnixNano() tn := t.UnixNano()
resetAfter := c.resetAt.Load() resetAfter := c.resetAt.Load()
if resetAfter > tn { if resetAfter > tn {
return c.counter.Inc() return c.counter.Add(1)
} }
c.counter.Store(1) c.counter.Store(1)
newResetAfter := tn + tick.Nanoseconds() newResetAfter := tn + tick.Nanoseconds()
if !c.resetAt.CAS(resetAfter, newResetAfter) { if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) {
// We raced with another goroutine trying to reset, and it also reset // We raced with another goroutine trying to reset, and it also reset
// the counter to 1, so we need to reincrement the counter. // the counter to 1, so we need to reincrement the counter.
return c.counter.Inc() return c.counter.Add(1)
} }
return 1 return 1
@ -113,12 +112,12 @@ func nopSamplingHook(Entry, SamplingDecision) {}
// This hook may be used to get visibility into the performance of the sampler. // This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs. // For example, use it to track metrics of dropped versus sampled logs.
// //
// var dropped atomic.Int64 // var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { // zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 { // if dec&zapcore.LogDropped > 0 {
// dropped.Inc() // dropped.Inc()
// } // }
// }) // })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) { return optionFunc(func(s *sampler) {
s.hook = hook s.hook = hook
@ -135,7 +134,7 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
// //
// For example, // For example,
// //
// core = NewSamplerWithOptions(core, time.Second, 10, 5) // core = NewSamplerWithOptions(core, time.Second, 10, 5)
// //
// This will log the first 10 log entries with the same level and message // This will log the first 10 log entries with the same level and message
// in a one second interval as-is. Following that, it will allow through // in a one second interval as-is. Following that, it will allow through
@ -175,6 +174,11 @@ type sampler struct {
hook func(Entry, SamplingDecision) hook func(Entry, SamplingDecision)
} }
var (
_ Core = (*sampler)(nil)
_ leveledEnabler = (*sampler)(nil)
)
// NewSampler creates a Core that samples incoming entries, which // NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a // caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs. // representative subset of your logs.
@ -192,6 +196,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
return NewSamplerWithOptions(core, tick, first, thereafter) return NewSamplerWithOptions(core, tick, first, thereafter)
} }
func (s *sampler) Level() Level {
return LevelOf(s.Core)
}
func (s *sampler) With(fields []Field) Core { func (s *sampler) With(fields []Field) Core {
return &sampler{ return &sampler{
Core: s.Core.With(fields), Core: s.Core.With(fields),

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc. // Copyright (c) 2016-2022 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -24,6 +24,11 @@ import "go.uber.org/multierr"
type multiCore []Core type multiCore []Core
var (
_ leveledEnabler = multiCore(nil)
_ Core = multiCore(nil)
)
// NewTee creates a Core that duplicates log entries into two or more // NewTee creates a Core that duplicates log entries into two or more
// underlying Cores. // underlying Cores.
// //
@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core {
return clone return clone
} }
func (mc multiCore) Level() Level {
minLvl := _maxLevel // mc is never empty
for i := range mc {
if lvl := LevelOf(mc[i]); lvl < minLvl {
minLvl = lvl
}
}
return minLvl
}
func (mc multiCore) Enabled(lvl Level) bool { func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc { for i := range mc {
if mc[i].Enabled(lvl) { if mc[i].Enabled(lvl) {

3
vendor/golang.org/x/net/AUTHORS generated vendored
View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

3
vendor/golang.org/x/sync/AUTHORS generated vendored
View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

30
vendor/modules.txt vendored
View File

@ -1,40 +1,40 @@
# github.com/cyrilix/robocar-base v0.1.7 # github.com/cyrilix/robocar-base v0.1.8
## explicit; go 1.18 ## explicit; go 1.21
github.com/cyrilix/robocar-base/cli github.com/cyrilix/robocar-base/cli
github.com/cyrilix/robocar-base/service github.com/cyrilix/robocar-base/service
github.com/cyrilix/robocar-base/testtools github.com/cyrilix/robocar-base/testtools
# github.com/cyrilix/robocar-protobuf/go v1.4.0 # github.com/cyrilix/robocar-protobuf/go v1.4.0
## explicit; go 1.21 ## explicit; go 1.21
github.com/cyrilix/robocar-protobuf/go/events github.com/cyrilix/robocar-protobuf/go/events
# github.com/eclipse/paho.mqtt.golang v1.4.1 # github.com/eclipse/paho.mqtt.golang v1.4.3
## explicit; go 1.14 ## explicit; go 1.18
github.com/eclipse/paho.mqtt.golang github.com/eclipse/paho.mqtt.golang
github.com/eclipse/paho.mqtt.golang/packets github.com/eclipse/paho.mqtt.golang/packets
# github.com/gorilla/websocket v1.4.2 # github.com/gorilla/websocket v1.5.0
## explicit; go 1.12 ## explicit; go 1.12
github.com/gorilla/websocket github.com/gorilla/websocket
# go.uber.org/atomic v1.7.0 # go.uber.org/multierr v1.10.0
## explicit; go 1.13 ## explicit; go 1.19
go.uber.org/atomic
# go.uber.org/multierr v1.6.0
## explicit; go 1.12
go.uber.org/multierr go.uber.org/multierr
# go.uber.org/zap v1.21.0 # go.uber.org/zap v1.26.0
## explicit; go 1.13 ## explicit; go 1.19
go.uber.org/zap go.uber.org/zap
go.uber.org/zap/buffer go.uber.org/zap/buffer
go.uber.org/zap/internal
go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/bufferpool
go.uber.org/zap/internal/color go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit go.uber.org/zap/internal/exit
go.uber.org/zap/internal/pool
go.uber.org/zap/internal/stacktrace
go.uber.org/zap/zapcore go.uber.org/zap/zapcore
# gocv.io/x/gocv v0.31.0 # gocv.io/x/gocv v0.31.0
## explicit; go 1.13 ## explicit; go 1.13
gocv.io/x/gocv gocv.io/x/gocv
# golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 # golang.org/x/net v0.9.0
## explicit; go 1.11 ## explicit; go 1.17
golang.org/x/net/internal/socks golang.org/x/net/internal/socks
golang.org/x/net/proxy golang.org/x/net/proxy
# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c # golang.org/x/sync v0.1.0
## explicit ## explicit
golang.org/x/sync/semaphore golang.org/x/sync/semaphore
# google.golang.org/protobuf v1.31.0 # google.golang.org/protobuf v1.31.0