build: upgrade to go 1.18 and dependencies

refs robocars/robocar-setup#3
This commit is contained in:
Cyrille Nofficial 2022-04-12 17:56:13 +02:00
parent 25035710b5
commit ddc5ee91e5
18 changed files with 395 additions and 194 deletions

4
go.mod
View File

@ -1,11 +1,11 @@
module github.com/cyrilix/robocar-base
go 1.17
go 1.18
require (
github.com/eclipse/paho.mqtt.golang v1.3.5
github.com/testcontainers/testcontainers-go v0.9.0
go.uber.org/zap v1.19.1
go.uber.org/zap v1.21.0
google.golang.org/protobuf v1.27.1
)

8
go.sum
View File

@ -108,12 +108,12 @@ github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLY
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=

12
vendor/go.uber.org/zap/.readme.tmpl generated vendored
View File

@ -96,14 +96,14 @@ Released under the [MIT License](LICENSE.txt).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
[doc]: https://godoc.org/go.uber.org/zap
[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
[ci]: https://travis-ci.com/uber-go/zap
[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
[doc]: https://pkg.go.dev/go.uber.org/zap
[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/zap
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod

50
vendor/go.uber.org/zap/CHANGELOG.md generated vendored
View File

@ -3,9 +3,57 @@ All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## 1.21.0 (7 Feb 2022)
Enhancements:
* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
string.
Bugfixes:
* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
Other changes:
* [#1052][]: Improve encoding performance when the `AddCaller` and
`AddStacktrace` options are used together.
[#1047]: https://github.com/uber-go/zap/pull/1047
[#1048]: https://github.com/uber-go/zap/pull/1048
[#1052]: https://github.com/uber-go/zap/pull/1052
[#1058]: https://github.com/uber-go/zap/pull/1058
Thanks to @aerosol and @Techassi for their contributions to this release.
## 1.20.0 (4 Jan 2022)
Enhancements:
* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
characters between log statements.
* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
encoding of reflected log fields.
Bugfixes:
* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
methods when the methods return.
* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
Other changes:
* [#1028][]: Drop support for Go < 1.15.
[#554]: https://github.com/uber-go/zap/pull/554
[#989]: https://github.com/uber-go/zap/pull/989
[#1011]: https://github.com/uber-go/zap/pull/1011
[#1017]: https://github.com/uber-go/zap/pull/1017
[#1028]: https://github.com/uber-go/zap/pull/1028
[#1033]: https://github.com/uber-go/zap/pull/1033
[#1039]: https://github.com/uber-go/zap/pull/1039
Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
## 1.19.1 (8 Sep 2021)
### Fixed
Bugfixes:
* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
* [#1003][]: JSON: Fix inaccurate precision when encoding float32.

44
vendor/go.uber.org/zap/README.md generated vendored
View File

@ -66,38 +66,38 @@ Log a message and 10 fields:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 862 ns/op | +0% | 5 allocs/op
| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
| zerolog | 4021 ns/op | +366% | 76 allocs/op
| go-kit | 4542 ns/op | +427% | 105 allocs/op
| apex/log | 26785 ns/op | +3007% | 115 allocs/op
| logrus | 29501 ns/op | +3322% | 125 allocs/op
| log15 | 29906 ns/op | +3369% | 122 allocs/op
| :zap: zap | 2900 ns/op | +0% | 5 allocs/op
| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op
| zerolog | 10639 ns/op | +267% | 32 allocs/op
| go-kit | 14434 ns/op | +398% | 59 allocs/op
| logrus | 17104 ns/op | +490% | 81 allocs/op
| apex/log | 32424 ns/op | +1018% | 66 allocs/op
| log15 | 33579 ns/op | +1058% | 76 allocs/op
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 126 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
| zerolog | 88 ns/op | -30% | 0 allocs/op
| go-kit | 5087 ns/op | +3937% | 103 allocs/op
| log15 | 18548 ns/op | +14621% | 73 allocs/op
| apex/log | 26012 ns/op | +20544% | 104 allocs/op
| logrus | 27236 ns/op | +21516% | 113 allocs/op
| :zap: zap | 373 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op
| zerolog | 288 ns/op | -23% | 0 allocs/op
| go-kit | 11785 ns/op | +3060% | 58 allocs/op
| logrus | 19629 ns/op | +5162% | 70 allocs/op
| log15 | 21866 ns/op | +5762% | 72 allocs/op
| apex/log | 30890 ns/op | +8182% | 55 allocs/op
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 118 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
| zerolog | 93 ns/op | -21% | 0 allocs/op
| go-kit | 280 ns/op | +137% | 11 allocs/op
| standard library | 499 ns/op | +323% | 2 allocs/op
| apex/log | 1990 ns/op | +1586% | 10 allocs/op
| logrus | 3129 ns/op | +2552% | 24 allocs/op
| log15 | 3887 ns/op | +3194% | 23 allocs/op
| :zap: zap | 381 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op
| zerolog | 369 ns/op | -3% | 0 allocs/op
| standard library | 385 ns/op | +1% | 2 allocs/op
| go-kit | 606 ns/op | +59% | 11 allocs/op
| logrus | 1730 ns/op | +354% | 25 allocs/op
| apex/log | 1998 ns/op | +424% | 7 allocs/op
| log15 | 4546 ns/op | +1093% | 22 allocs/op
## Development Status: Stable

1
vendor/go.uber.org/zap/global.go generated vendored
View File

@ -31,6 +31,7 @@ import (
)
const (
_stdLogDefaultDepth = 1
_loggerWriterDepth = 2
_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
"https://github.com/uber-go/zap/issues/new and reference this error: %v"

View File

@ -1,26 +0,0 @@
// Copyright (c) 2019 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// See #682 for more information.
// +build !go1.12
package zap
const _stdLogDefaultDepth = 2

17
vendor/go.uber.org/zap/level.go generated vendored
View File

@ -86,6 +86,23 @@ func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
return a
}
// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
// representation of the log level. If the provided ASCII representation is
// invalid an error is returned.
//
// This is particularly useful when dealing with text input to configure log
// levels.
func ParseAtomicLevel(text string) (AtomicLevel, error) {
a := NewAtomicLevel()
l, err := zapcore.ParseLevel(text)
if err != nil {
return a, err
}
a.SetLevel(l)
return a, nil
}
// Enabled implements the zapcore.LevelEnabler interface, which allows the
// AtomicLevel to be used in place of traditional static levels.
func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {

71
vendor/go.uber.org/zap/logger.go generated vendored
View File

@ -24,9 +24,9 @@ import (
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/zapcore"
)
@ -259,8 +259,10 @@ func (log *Logger) clone() *Logger {
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// check must always be called directly by a method in the Logger interface
// (e.g., Check, Info, Fatal).
// Logger.check must always be called directly by a method in the
// Logger interface (e.g., Check, Info, Fatal).
// This skips Logger.check and the Info/Fatal/Check/etc. method that
// called it.
const callerSkipOffset = 2
// Check the level first to reduce the cost of disabled log calls.
@ -307,42 +309,55 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Thread the error output through to the CheckedEntry.
ce.ErrorOutput = log.errorOutput
if log.addCaller {
frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
if !defined {
addStack := log.addStack.Enabled(ce.Level)
if !log.addCaller && !addStack {
return ce
}
// Adding the caller or stack trace requires capturing the callers of
// this function. We'll share information between these two.
stackDepth := stacktraceFirst
if addStack {
stackDepth = stacktraceFull
}
stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth)
defer stack.Free()
if stack.Count() == 0 {
if log.addCaller {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
log.errorOutput.Sync()
}
return ce
}
ce.Entry.Caller = zapcore.EntryCaller{
Defined: defined,
frame, more := stack.Next()
if log.addCaller {
ce.Caller = zapcore.EntryCaller{
Defined: frame.PC != 0,
PC: frame.PC,
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
}
if log.addStack.Enabled(ce.Entry.Level) {
ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
if addStack {
buffer := bufferpool.Get()
defer buffer.Free()
stackfmt := newStackFormatter(buffer)
// We've already extracted the first frame, so format that
// separately and defer to stackfmt for the rest.
stackfmt.FormatFrame(frame)
if more {
stackfmt.FormatStack(stack)
}
ce.Stack = buffer.String()
}
return ce
}
// getCallerFrame gets caller frame. The argument skip is the number of stack
// frames to ascend, with 0 identifying the caller of getCallerFrame. The
// boolean ok is false if it was not possible to recover the information.
//
// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
const skipOffset = 2 // skip getCallerFrame and Callers
pc := make([]uintptr, 1)
numFrames := runtime.Callers(skip+skipOffset, pc)
if numFrames < 1 {
return
}
frame, _ = runtime.CallersFrames(pc).Next()
return frame, frame.PC != 0
}

179
vendor/go.uber.org/zap/stacktrace.go generated vendored
View File

@ -24,62 +24,153 @@ import (
"runtime"
"sync"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
)
var (
_stacktracePool = sync.Pool{
New: func() interface{} {
return newProgramCounters(64)
},
}
var _stacktracePool = sync.Pool{
New: func() interface{} {
return &stacktrace{
storage: make([]uintptr, 64),
}
},
}
type stacktrace struct {
pcs []uintptr // program counters; always a subslice of storage
frames *runtime.Frames
// The size of pcs varies depending on requirements:
// it will be one if the only the first frame was requested,
// and otherwise it will reflect the depth of the call stack.
//
// storage decouples the slice we need (pcs) from the slice we pool.
// We will always allocate a reasonably large storage, but we'll use
// only as much of it as we need.
storage []uintptr
}
// stacktraceDepth specifies how deep of a stack trace should be captured.
type stacktraceDepth int
const (
// stacktraceFirst captures only the first frame.
stacktraceFirst stacktraceDepth = iota
// stacktraceFull captures the entire call stack, allocating more
// storage for it if needed.
stacktraceFull
)
// captureStacktrace captures a stack trace of the specified depth, skipping
// the provided number of frames. skip=0 identifies the caller of
// captureStacktrace.
//
// The caller must call Free on the returned stacktrace after using it.
func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
stack := _stacktracePool.Get().(*stacktrace)
switch depth {
case stacktraceFirst:
stack.pcs = stack.storage[:1]
case stacktraceFull:
stack.pcs = stack.storage
}
// Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
// itself. +2 to skip captureStacktrace and runtime.Callers.
numFrames := runtime.Callers(
skip+2,
stack.pcs,
)
// runtime.Callers truncates the recorded stacktrace if there is no
// room in the provided slice. For the full stack trace, keep expanding
// storage until there are fewer frames than there is room.
if depth == stacktraceFull {
pcs := stack.pcs
for numFrames == len(pcs) {
pcs = make([]uintptr, len(pcs)*2)
numFrames = runtime.Callers(skip+2, pcs)
}
// Discard old storage instead of returning it to the pool.
// This will adjust the pool size over time if stack traces are
// consistently very deep.
stack.storage = pcs
stack.pcs = pcs[:numFrames]
} else {
stack.pcs = stack.pcs[:numFrames]
}
stack.frames = runtime.CallersFrames(stack.pcs)
return stack
}
// Free releases resources associated with this stacktrace
// and returns it back to the pool.
func (st *stacktrace) Free() {
st.frames = nil
st.pcs = nil
_stacktracePool.Put(st)
}
// Count reports the total number of frames in this stacktrace.
// Count DOES NOT change as Next is called.
func (st *stacktrace) Count() int {
return len(st.pcs)
}
// Next returns the next frame in the stack trace,
// and a boolean indicating whether there are more after it.
func (st *stacktrace) Next() (_ runtime.Frame, more bool) {
return st.frames.Next()
}
func takeStacktrace(skip int) string {
stack := captureStacktrace(skip+1, stacktraceFull)
defer stack.Free()
buffer := bufferpool.Get()
defer buffer.Free()
programCounters := _stacktracePool.Get().(*programCounters)
defer _stacktracePool.Put(programCounters)
var numFrames int
for {
// Skip the call to runtime.Callers and takeStacktrace so that the
// program counters start at the caller of takeStacktrace.
numFrames = runtime.Callers(skip+2, programCounters.pcs)
if numFrames < len(programCounters.pcs) {
break
}
// Don't put the too-short counter slice back into the pool; this lets
// the pool adjust if we consistently take deep stacktraces.
programCounters = newProgramCounters(len(programCounters.pcs) * 2)
}
i := 0
frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
// Note: On the last iteration, frames.Next() returns false, with a valid
// frame, but we ignore this frame. The last frame is a a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit.
for frame, more := frames.Next(); more; frame, more = frames.Next() {
if i != 0 {
buffer.AppendByte('\n')
}
i++
buffer.AppendString(frame.Function)
buffer.AppendByte('\n')
buffer.AppendByte('\t')
buffer.AppendString(frame.File)
buffer.AppendByte(':')
buffer.AppendInt(int64(frame.Line))
}
stackfmt := newStackFormatter(buffer)
stackfmt.FormatStack(stack)
return buffer.String()
}
type programCounters struct {
pcs []uintptr
// stackFormatter formats a stack trace into a readable string representation.
type stackFormatter struct {
b *buffer.Buffer
nonEmpty bool // whehther we've written at least one frame already
}
func newProgramCounters(size int) *programCounters {
return &programCounters{make([]uintptr, size)}
// newStackFormatter builds a new stackFormatter.
func newStackFormatter(b *buffer.Buffer) stackFormatter {
return stackFormatter{b: b}
}
// FormatStack formats all remaining frames in the provided stacktrace -- minus
// the final runtime.main/runtime.goexit frame.
func (sf *stackFormatter) FormatStack(stack *stacktrace) {
// Note: On the last iteration, frames.Next() returns false, with a valid
// frame, but we ignore this frame. The last frame is a a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit.
for frame, more := stack.Next(); more; frame, more = stack.Next() {
sf.FormatFrame(frame)
}
}
// FormatFrame formats the given frame.
func (sf *stackFormatter) FormatFrame(frame runtime.Frame) {
if sf.nonEmpty {
sf.b.AppendByte('\n')
}
sf.nonEmpty = true
sf.b.AppendString(frame.Function)
sf.b.AppendByte('\n')
sf.b.AppendByte('\t')
sf.b.AppendString(frame.File)
sf.b.AppendByte(':')
sf.b.AppendInt(int64(frame.Line))
}

View File

@ -20,9 +20,7 @@
package zapcore
import (
"time"
)
import "time"
// DefaultClock is the default clock used by Zap in operations that require
// time. This clock uses the system clock for all operations.

View File

@ -125,11 +125,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
line.AppendString(ent.Stack)
}
if c.LineEnding != "" {
line.AppendString(c.LineEnding)
} else {
line.AppendString(DefaultLineEnding)
}
line.AppendString(c.LineEnding)
return line, nil
}

View File

@ -22,6 +22,7 @@ package zapcore
import (
"encoding/json"
"io"
"time"
"go.uber.org/zap/buffer"
@ -312,14 +313,15 @@ func (e *NameEncoder) UnmarshalText(text []byte) error {
type EncoderConfig struct {
// Set the keys used for each log entry. If any key is empty, that portion
// of the entry is omitted.
MessageKey string `json:"messageKey" yaml:"messageKey"`
LevelKey string `json:"levelKey" yaml:"levelKey"`
TimeKey string `json:"timeKey" yaml:"timeKey"`
NameKey string `json:"nameKey" yaml:"nameKey"`
CallerKey string `json:"callerKey" yaml:"callerKey"`
FunctionKey string `json:"functionKey" yaml:"functionKey"`
StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
LineEnding string `json:"lineEnding" yaml:"lineEnding"`
MessageKey string `json:"messageKey" yaml:"messageKey"`
LevelKey string `json:"levelKey" yaml:"levelKey"`
TimeKey string `json:"timeKey" yaml:"timeKey"`
NameKey string `json:"nameKey" yaml:"nameKey"`
CallerKey string `json:"callerKey" yaml:"callerKey"`
FunctionKey string `json:"functionKey" yaml:"functionKey"`
StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
LineEnding string `json:"lineEnding" yaml:"lineEnding"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
@ -330,6 +332,9 @@ type EncoderConfig struct {
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
// Configure the encoder for interface{} type objects.
// If not provided, objects are encoded using json.Encoder
NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
// Configures the field separator used by the console encoder. Defaults
// to tab.
ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`

View File

@ -22,7 +22,6 @@ package zapcore
import (
"encoding/base64"
"encoding/json"
"math"
"sync"
"time"
@ -64,7 +63,7 @@ type jsonEncoder struct {
// for encoding generic values by reflection
reflectBuf *buffer.Buffer
reflectEnc *json.Encoder
reflectEnc ReflectedEncoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
@ -82,6 +81,17 @@ func NewJSONEncoder(cfg EncoderConfig) Encoder {
}
func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
if cfg.SkipLineEnding {
cfg.LineEnding = ""
} else if cfg.LineEnding == "" {
cfg.LineEnding = DefaultLineEnding
}
// If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
if cfg.NewReflectedEncoder == nil {
cfg.NewReflectedEncoder = defaultReflectedEncoder
}
return &jsonEncoder{
EncoderConfig: &cfg,
buf: bufferpool.Get(),
@ -118,6 +128,11 @@ func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.AppendComplex128(val)
}
func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
enc.addKey(key)
enc.AppendComplex64(val)
}
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
@ -141,10 +156,7 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) {
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = bufferpool.Get()
enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
// For consistency with our custom JSON encoder.
enc.reflectEnc.SetEscapeHTML(false)
enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
@ -206,10 +218,16 @@ func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
// Close ONLY new openNamespaces that are created during
// AppendObject().
old := enc.openNamespaces
enc.openNamespaces = 0
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
enc.closeOpenNamespaces()
enc.openNamespaces = old
return err
}
@ -225,20 +243,23 @@ func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendComplex128(val complex128) {
// appendComplex appends the encoded form of the provided complex128 value.
// precision specifies the encoding precision for the real and imaginary
// components of the complex number.
func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
enc.buf.AppendFloat(r, 64)
enc.buf.AppendFloat(r, precision)
// If imaginary part is less than 0, minus (-) sign is added by default
// by AppendFloat.
if i >= 0 {
enc.buf.AppendByte('+')
}
enc.buf.AppendFloat(i, 64)
enc.buf.AppendFloat(i, precision)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
@ -301,28 +322,28 @@ func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.buf.AppendUint(val)
}
func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
@ -343,7 +364,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final := enc.clone()
final.buf.AppendByte('{')
if final.LevelKey != "" {
if final.LevelKey != "" && final.EncodeLevel != nil {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
@ -404,11 +425,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
if final.LineEnding != "" {
final.buf.AppendString(final.LineEnding)
} else {
final.buf.AppendString(DefaultLineEnding)
}
final.buf.AppendString(final.LineEnding)
ret := final.buf
putJSONEncoder(final)
@ -423,6 +440,7 @@ func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
enc.openNamespaces = 0
}
func (enc *jsonEncoder) addKey(key string) {

View File

@ -55,6 +55,18 @@ const (
_maxLevel = FatalLevel
)
// ParseLevel parses a level based on the lower-case or all-caps ASCII
// representation of the log level. If the provided ASCII representation is
// invalid an error is returned.
//
// This is particularly useful when dealing with text input to configure log
// levels.
func ParseLevel(text string) (Level, error) {
var level Level
err := level.UnmarshalText([]byte(text))
return level, err
}
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {

View File

@ -1,4 +1,4 @@
// Copyright (c) 2019 Uber Technologies, Inc.
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -18,9 +18,24 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// See #682 for more information.
// +build go1.12
package zapcore
package zap
import (
"encoding/json"
"io"
)
const _stdLogDefaultDepth = 1
// ReflectedEncoder serializes log fields that can't be serialized with Zap's
// JSON encoder. These have the ReflectType field type.
// Use EncoderConfig.NewReflectedEncoder to set this.
type ReflectedEncoder interface {
// Encode encodes and writes to the underlying data stream.
Encode(interface{}) error
}
func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
enc := json.NewEncoder(w)
// For consistency with our custom JSON encoder.
enc.SetEscapeHTML(false)
return enc
}

View File

@ -133,10 +133,21 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
// For example,
//
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
//
// This will log the first 10 log entries with the same level and message
// in a one second interval as-is. Following that, it will allow through
// every 5th log entry with the same level and message in that interval.
//
// If thereafter is zero, the Core will drop all log entries after the first N
// in that interval.
//
// Sampler can be configured to report sampling decisions with the SamplerHook
// option.
//
// Keep in mind that zap's sampling implementation is optimized for speed over
// Keep in mind that Zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
@ -200,7 +211,7 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
if ent.Level >= _minLevel && ent.Level <= _maxLevel {
counter := s.counts.get(ent.Level, ent.Message)
n := counter.IncCheckReset(ent.Time, s.tick)
if n > s.first && (n-s.first)%s.thereafter != 0 {
if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
s.hook(ent, LogDropped)
return ce
}

2
vendor/modules.txt vendored
View File

@ -104,7 +104,7 @@ go.uber.org/atomic
# go.uber.org/multierr v1.6.0
## explicit; go 1.12
go.uber.org/multierr
# go.uber.org/zap v1.19.1
# go.uber.org/zap v1.21.0
## explicit; go 1.13
go.uber.org/zap
go.uber.org/zap/buffer