build: update dependencies

This commit is contained in:
2022-09-05 16:44:02 +02:00
committed by Cyrille Nofficial
parent d35b8db825
commit 517e558081
86 changed files with 2289 additions and 1746 deletions

View File

@ -43,6 +43,37 @@ const (
//
// BufferedWriteSyncer is safe for concurrent use. You don't need to use
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
//
// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
// destination (*os.File is a valid WriteSyncer), wrap it with
// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
// object.
//
// func main() {
// ws := ... // your log destination
// bws := &zapcore.BufferedWriteSyncer{WS: ws}
// defer bws.Stop()
//
// // ...
// core := zapcore.NewCore(enc, bws, lvl)
// logger := zap.New(core)
//
// // ...
// }
//
// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
// waiting at most 30 seconds between flushes.
// You can customize these parameters by setting the Size or FlushInterval
// fields.
// For example, the following buffers up to 512 kB of logs before flushing them
// to Stderr, with a maximum of one minute between each flush.
//
// ws := &BufferedWriteSyncer{
// WS: os.Stderr,
// Size: 512 * 1024, // 512 kB
// FlushInterval: time.Minute,
// }
// defer ws.Stop()
type BufferedWriteSyncer struct {
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
// writes.

View File

@ -69,6 +69,15 @@ type ioCore struct {
out WriteSyncer
}
var (
_ Core = (*ioCore)(nil)
_ leveledEnabler = (*ioCore)(nil)
)
func (c *ioCore) Level() Level {
return LevelOf(c.LevelEnabler)
}
func (c *ioCore) With(fields []Field) Core {
clone := c.clone()
addFields(clone.enc, fields)

View File

@ -188,10 +188,13 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error {
// UnmarshalYAML unmarshals YAML to a TimeEncoder.
// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
// timeEncoder:
// layout: 06/01/02 03:04pm
//
// timeEncoder:
// layout: 06/01/02 03:04pm
//
// If value is string, it uses UnmarshalText.
// timeEncoder: iso8601
//
// timeEncoder: iso8601
func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
var o struct {
Layout string `json:"layout" yaml:"layout"`

View File

@ -27,10 +27,9 @@ import (
"sync"
"time"
"go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit"
"go.uber.org/multierr"
)
var (
@ -152,6 +151,27 @@ type Entry struct {
Stack string
}
// CheckWriteHook is a custom action that may be executed after an entry is
// written.
//
// Register one on a CheckedEntry with the After method.
//
// if ce := logger.Check(...); ce != nil {
// ce = ce.After(hook)
// ce.Write(...)
// }
//
// You can configure the hook for Fatal log statements at the logger level with
// the zap.WithFatalHook option.
type CheckWriteHook interface {
// OnWrite is invoked with the CheckedEntry that was written and a list
// of fields added with that entry.
//
// The list of fields DOES NOT include fields that were already added
// to the logger with the With method.
OnWrite(*CheckedEntry, []Field)
}
// CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8
@ -164,21 +184,36 @@ const (
WriteThenGoexit
// WriteThenPanic causes a panic after Write.
WriteThenPanic
// WriteThenFatal causes a fatal os.Exit after Write.
// WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal
)
// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
// with the new CheckWriteHook interface which deprecates CheckWriteAction.
func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
switch a {
case WriteThenGoexit:
runtime.Goexit()
case WriteThenPanic:
panic(ce.Message)
case WriteThenFatal:
exit.With(1)
}
}
var _ CheckWriteHook = CheckWriteAction(0)
// CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it.
//
// CheckedEntry references should be created by calling AddCore or Should on a
// CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method.
type CheckedEntry struct {
Entry
ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse
should CheckWriteAction
after CheckWriteHook
cores []Core
}
@ -186,7 +221,7 @@ func (ce *CheckedEntry) reset() {
ce.Entry = Entry{}
ce.ErrorOutput = nil
ce.dirty = false
ce.should = WriteThenNoop
ce.after = nil
for i := range ce.cores {
// don't keep references to cores
ce.cores[i] = nil
@ -224,17 +259,11 @@ func (ce *CheckedEntry) Write(fields ...Field) {
ce.ErrorOutput.Sync()
}
should, msg := ce.should, ce.Message
putCheckedEntry(ce)
switch should {
case WriteThenPanic:
panic(msg)
case WriteThenFatal:
exit.Exit()
case WriteThenGoexit:
runtime.Goexit()
hook := ce.after
if hook != nil {
hook.OnWrite(ce, fields)
}
putCheckedEntry(ce)
}
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@ -252,11 +281,19 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
// Deprecated: Use After(ent Entry, after CheckWriteHook) instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
return ce.After(ent, should)
}
// After sets this CheckEntry's CheckWriteHook, which will be called after this
// log entry has been written. It's safe to call this on nil CheckedEntry
// references.
func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
ce.should = should
ce.after = hook
return ce
}

View File

@ -36,13 +36,13 @@ import (
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of.
//
// {
// "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [
// ...
// ],
// }
// {
// "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [
// ...
// ],
// }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling
// the Error() method

View File

@ -27,6 +27,11 @@ type hooked struct {
funcs []func(Entry) error
}
var (
_ Core = (*hooked)(nil)
_ leveledEnabler = (*hooked)(nil)
)
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
}
}
func (h *hooked) Level() Level {
return LevelOf(h.Core)
}
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the

View File

@ -27,6 +27,11 @@ type levelFilterCore struct {
level LevelEnabler
}
var (
_ Core = (*levelFilterCore)(nil)
_ leveledEnabler = (*levelFilterCore)(nil)
)
// NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level,
@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool {
return c.level.Enabled(lvl)
}
func (c *levelFilterCore) Level() Level {
return LevelOf(c.level)
}
func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level}
}

View File

@ -71,7 +71,9 @@ type jsonEncoder struct {
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
// {"foo":"bar","foo":"baz"}
//
// {"foo":"bar","foo":"baz"}
//
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate

View File

@ -53,6 +53,11 @@ const (
_minLevel = DebugLevel
_maxLevel = FatalLevel
// InvalidLevel is an invalid value for Level.
//
// Core implementations may panic if they see messages of this level.
InvalidLevel = _maxLevel + 1
)
// ParseLevel parses a level based on the lower-case or all-caps ASCII
@ -67,6 +72,43 @@ func ParseLevel(text string) (Level, error) {
return level, err
}
type leveledEnabler interface {
LevelEnabler
Level() Level
}
// LevelOf reports the minimum enabled log level for the given LevelEnabler
// from Zap's supported log levels, or [InvalidLevel] if none of them are
// enabled.
//
// A LevelEnabler may implement a 'Level() Level' method to override the
// behavior of this function.
//
// func (c *core) Level() Level {
// return c.currentLevel
// }
//
// It is recommended that [Core] implementations that wrap other cores use
// LevelOf to retrieve the level of the wrapped core. For example,
//
// func (c *coreWrapper) Level() Level {
// return zapcore.LevelOf(c.wrappedCore)
// }
func LevelOf(enab LevelEnabler) Level {
if lvler, ok := enab.(leveledEnabler); ok {
return lvler.Level()
}
for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
if enab.Enabled(lvl) {
return lvl
}
}
return InvalidLevel
}
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -113,12 +113,12 @@ func nopSamplingHook(Entry, SamplingDecision) {}
// This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs.
//
// var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 {
// dropped.Inc()
// }
// })
// var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 {
// dropped.Inc()
// }
// })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) {
s.hook = hook
@ -135,7 +135,7 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
//
// For example,
//
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
//
// This will log the first 10 log entries with the same level and message
// in a one second interval as-is. Following that, it will allow through
@ -175,6 +175,11 @@ type sampler struct {
hook func(Entry, SamplingDecision)
}
var (
_ Core = (*sampler)(nil)
_ leveledEnabler = (*sampler)(nil)
)
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
@ -192,6 +197,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
return NewSamplerWithOptions(core, tick, first, thereafter)
}
func (s *sampler) Level() Level {
return LevelOf(s.Core)
}
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -24,6 +24,11 @@ import "go.uber.org/multierr"
type multiCore []Core
var (
_ leveledEnabler = multiCore(nil)
_ Core = multiCore(nil)
)
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core {
return clone
}
func (mc multiCore) Level() Level {
minLvl := _maxLevel // mc is never empty
for i := range mc {
if lvl := LevelOf(mc[i]); lvl < minLvl {
minLvl = lvl
}
}
return minLvl
}
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {