chore: upgrade dependencies

This commit is contained in:
2023-10-15 11:51:11 +02:00
parent 156cc5204d
commit 49aaa38f82
99 changed files with 2799 additions and 2511 deletions

View File

@ -43,6 +43,37 @@ const (
//
// BufferedWriteSyncer is safe for concurrent use. You don't need to use
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
//
// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
// destination (*os.File is a valid WriteSyncer), wrap it with
// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
// object.
//
// func main() {
// ws := ... // your log destination
// bws := &zapcore.BufferedWriteSyncer{WS: ws}
// defer bws.Stop()
//
// // ...
// core := zapcore.NewCore(enc, bws, lvl)
// logger := zap.New(core)
//
// // ...
// }
//
// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
// waiting at most 30 seconds between flushes.
// You can customize these parameters by setting the Size or FlushInterval
// fields.
// For example, the following buffers up to 512 kB of logs before flushing them
// to Stderr, with a maximum of one minute between each flush.
//
// ws := &BufferedWriteSyncer{
// WS: os.Stderr,
// Size: 512 * 1024, // 512 kB
// FlushInterval: time.Minute,
// }
// defer ws.Stop()
type BufferedWriteSyncer struct {
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
// writes.

View File

@ -22,20 +22,20 @@ package zapcore
import (
"fmt"
"sync"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/pool"
)
var _sliceEncoderPool = sync.Pool{
New: func() interface{} {
return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
},
}
var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder {
return &sliceArrayEncoder{
elems: make([]interface{}, 0, 2),
}
})
func getSliceEncoder() *sliceArrayEncoder {
return _sliceEncoderPool.Get().(*sliceArrayEncoder)
return _sliceEncoderPool.Get()
}
func putSliceEncoder(e *sliceArrayEncoder) {

View File

@ -69,6 +69,15 @@ type ioCore struct {
out WriteSyncer
}
var (
_ Core = (*ioCore)(nil)
_ leveledEnabler = (*ioCore)(nil)
)
func (c *ioCore) Level() Level {
return LevelOf(c.LevelEnabler)
}
func (c *ioCore) With(fields []Field) Core {
clone := c.clone()
addFields(clone.enc, fields)
@ -93,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error {
return err
}
if ent.Level > ErrorLevel {
// Since we may be crashing the program, sync the output. Ignore Sync
// errors, pending a clean solution to issue #370.
c.Sync()
// Since we may be crashing the program, sync the output.
// Ignore Sync errors, pending a clean solution to issue #370.
_ = c.Sync()
}
return nil
}

View File

@ -188,10 +188,13 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error {
// UnmarshalYAML unmarshals YAML to a TimeEncoder.
// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
// timeEncoder:
// layout: 06/01/02 03:04pm
//
// timeEncoder:
// layout: 06/01/02 03:04pm
//
// If value is string, it uses UnmarshalText.
// timeEncoder: iso8601
//
// timeEncoder: iso8601
func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
var o struct {
Layout string `json:"layout" yaml:"layout"`

View File

@ -24,26 +24,23 @@ import (
"fmt"
"runtime"
"strings"
"sync"
"time"
"go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit"
"go.uber.org/multierr"
"go.uber.org/zap/internal/pool"
)
var (
_cePool = sync.Pool{New: func() interface{} {
// Pre-allocate some space for cores.
return &CheckedEntry{
cores: make([]Core, 4),
}
}}
)
var _cePool = pool.New(func() *CheckedEntry {
// Pre-allocate some space for cores.
return &CheckedEntry{
cores: make([]Core, 4),
}
})
func getCheckedEntry() *CheckedEntry {
ce := _cePool.Get().(*CheckedEntry)
ce := _cePool.Get()
ce.reset()
return ce
}
@ -152,6 +149,27 @@ type Entry struct {
Stack string
}
// CheckWriteHook is a custom action that may be executed after an entry is
// written.
//
// Register one on a CheckedEntry with the After method.
//
// if ce := logger.Check(...); ce != nil {
// ce = ce.After(hook)
// ce.Write(...)
// }
//
// You can configure the hook for Fatal log statements at the logger level with
// the zap.WithFatalHook option.
type CheckWriteHook interface {
// OnWrite is invoked with the CheckedEntry that was written and a list
// of fields added with that entry.
//
// The list of fields DOES NOT include fields that were already added
// to the logger with the With method.
OnWrite(*CheckedEntry, []Field)
}
// CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8
@ -164,21 +182,36 @@ const (
WriteThenGoexit
// WriteThenPanic causes a panic after Write.
WriteThenPanic
// WriteThenFatal causes a fatal os.Exit after Write.
// WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal
)
// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
// with the new CheckWriteHook interface which deprecates CheckWriteAction.
func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
switch a {
case WriteThenGoexit:
runtime.Goexit()
case WriteThenPanic:
panic(ce.Message)
case WriteThenFatal:
exit.With(1)
}
}
var _ CheckWriteHook = CheckWriteAction(0)
// CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it.
//
// CheckedEntry references should be created by calling AddCore or Should on a
// CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method.
type CheckedEntry struct {
Entry
ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse
should CheckWriteAction
after CheckWriteHook
cores []Core
}
@ -186,7 +219,7 @@ func (ce *CheckedEntry) reset() {
ce.Entry = Entry{}
ce.ErrorOutput = nil
ce.dirty = false
ce.should = WriteThenNoop
ce.after = nil
for i := range ce.cores {
// don't keep references to cores
ce.cores[i] = nil
@ -209,7 +242,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
// CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites.
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
ce.ErrorOutput.Sync()
_ = ce.ErrorOutput.Sync() // ignore error
}
return
}
@ -221,20 +254,14 @@ func (ce *CheckedEntry) Write(fields ...Field) {
}
if err != nil && ce.ErrorOutput != nil {
fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
ce.ErrorOutput.Sync()
_ = ce.ErrorOutput.Sync() // ignore error
}
should, msg := ce.should, ce.Message
hook := ce.after
if hook != nil {
hook.OnWrite(ce, fields)
}
putCheckedEntry(ce)
switch should {
case WriteThenPanic:
panic(msg)
case WriteThenFatal:
exit.Exit()
case WriteThenGoexit:
runtime.Goexit()
}
}
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@ -252,11 +279,20 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
//
// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
return ce.After(ent, should)
}
// After sets this CheckEntry's CheckWriteHook, which will be called after this
// log entry has been written. It's safe to call this on nil CheckedEntry
// references.
func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
ce.should = should
ce.after = hook
return ce
}

View File

@ -23,7 +23,8 @@ package zapcore
import (
"fmt"
"reflect"
"sync"
"go.uber.org/zap/internal/pool"
)
// Encodes the given error into fields of an object. A field with the given
@ -36,13 +37,13 @@ import (
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of.
//
// {
// "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [
// ...
// ],
// }
// {
// "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [
// ...
// ],
// }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling
// the Error() method
@ -97,15 +98,18 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
}
el := newErrArrayElem(errs[i])
arr.AppendObject(el)
err := arr.AppendObject(el)
el.Free()
if err != nil {
return err
}
}
return nil
}
var _errArrayElemPool = sync.Pool{New: func() interface{} {
var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
}}
})
// Encodes any error into a {"error": ...} re-using the same errors logic.
//
@ -113,7 +117,7 @@ var _errArrayElemPool = sync.Pool{New: func() interface{} {
type errArrayElem struct{ err error }
func newErrArrayElem(err error) *errArrayElem {
e := _errArrayElemPool.Get().(*errArrayElem)
e := _errArrayElemPool.Get()
e.err = err
return e
}

View File

@ -27,6 +27,11 @@ type hooked struct {
funcs []func(Entry) error
}
var (
_ Core = (*hooked)(nil)
_ leveledEnabler = (*hooked)(nil)
)
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
}
}
func (h *hooked) Level() Level {
return LevelOf(h.Core)
}
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the

View File

@ -27,6 +27,11 @@ type levelFilterCore struct {
level LevelEnabler
}
var (
_ Core = (*levelFilterCore)(nil)
_ leveledEnabler = (*levelFilterCore)(nil)
)
// NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level,
@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool {
return c.level.Enabled(lvl)
}
func (c *levelFilterCore) Level() Level {
return LevelOf(c.level)
}
func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level}
}

View File

@ -23,24 +23,20 @@ package zapcore
import (
"encoding/base64"
"math"
"sync"
"time"
"unicode/utf8"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/pool"
)
// For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef"
var _jsonPool = sync.Pool{New: func() interface{} {
var _jsonPool = pool.New(func() *jsonEncoder {
return &jsonEncoder{}
}}
func getJSONEncoder() *jsonEncoder {
return _jsonPool.Get().(*jsonEncoder)
}
})
func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil {
@ -71,7 +67,9 @@ type jsonEncoder struct {
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
// {"foo":"bar","foo":"baz"}
//
// {"foo":"bar","foo":"baz"}
//
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
@ -352,7 +350,7 @@ func (enc *jsonEncoder) Clone() Encoder {
}
func (enc *jsonEncoder) clone() *jsonEncoder {
clone := getJSONEncoder()
clone := _jsonPool.Get()
clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces
@ -488,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRuneInString(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.AppendString(s[i : i+size])
i += size
}
safeAppendStringLike(
(*buffer.Buffer).AppendString,
utf8.DecodeRuneInString,
enc.buf,
s,
)
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
safeAppendStringLike(
(*buffer.Buffer).AppendBytes,
utf8.DecodeRune,
enc.buf,
s,
)
}
// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
// It appends a string or byte slice to the buffer, escaping all special characters.
func safeAppendStringLike[S []byte | string](
// appendTo appends this string-like object to the buffer.
appendTo func(*buffer.Buffer, S),
// decodeRune decodes the next rune from the string-like object
// and returns its value and width in bytes.
decodeRune func(S) (rune, int),
buf *buffer.Buffer,
s S,
) {
// The encoding logic below works by skipping over characters
// that can be safely copied as-is,
// until a character is found that needs special handling.
// At that point, we copy everything we've seen so far,
// and then handle that special character.
//
// last is the index of the last byte that was copied to the buffer.
last := 0
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRune(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.Write(s[i : i+size])
i += size
}
}
if s[i] >= utf8.RuneSelf {
// Character >= RuneSelf may be part of a multi-byte rune.
// They need to be decoded before we can decide how to handle them.
r, size := decodeRune(s[i:])
if r != utf8.RuneError || size != 1 {
// No special handling required.
// Skip over this rune and continue.
i += size
continue
}
// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
if b >= utf8.RuneSelf {
return false
}
if 0x20 <= b && b != '\\' && b != '"' {
enc.buf.AppendByte(b)
return true
}
switch b {
case '\\', '"':
enc.buf.AppendByte('\\')
enc.buf.AppendByte(b)
case '\n':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('n')
case '\r':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('r')
case '\t':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
enc.buf.AppendString(`\u00`)
enc.buf.AppendByte(_hex[b>>4])
enc.buf.AppendByte(_hex[b&0xF])
}
return true
}
// Invalid UTF-8 sequence.
// Replace it with the Unicode replacement character.
appendTo(buf, s[last:i])
buf.AppendString(`\ufffd`)
func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
if r == utf8.RuneError && size == 1 {
enc.buf.AppendString(`\ufffd`)
return true
i++
last = i
} else {
// Character < RuneSelf is a single-byte UTF-8 rune.
if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
// No escaping necessary.
// Skip over this character and continue.
i++
continue
}
// This character needs to be escaped.
appendTo(buf, s[last:i])
switch s[i] {
case '\\', '"':
buf.AppendByte('\\')
buf.AppendByte(s[i])
case '\n':
buf.AppendByte('\\')
buf.AppendByte('n')
case '\r':
buf.AppendByte('\\')
buf.AppendByte('r')
case '\t':
buf.AppendByte('\\')
buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
buf.AppendString(`\u00`)
buf.AppendByte(_hex[s[i]>>4])
buf.AppendByte(_hex[s[i]&0xF])
}
i++
last = i
}
}
return false
// add remaining
appendTo(buf, s[last:])
}

54
vendor/go.uber.org/zap/zapcore/lazy_with.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import "sync"
type lazyWithCore struct {
Core
sync.Once
fields []Field
}
// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
// the logger is written to (or is further chained in a lon-lazy manner).
func NewLazyWith(core Core, fields []Field) Core {
return &lazyWithCore{
Core: core,
fields: fields,
}
}
func (d *lazyWithCore) initOnce() {
d.Once.Do(func() {
d.Core = d.Core.With(d.fields)
})
}
func (d *lazyWithCore) With(fields []Field) Core {
d.initOnce()
return d.Core.With(fields)
}
func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
d.initOnce()
return d.Core.Check(e, ce)
}

View File

@ -53,6 +53,11 @@ const (
_minLevel = DebugLevel
_maxLevel = FatalLevel
// InvalidLevel is an invalid value for Level.
//
// Core implementations may panic if they see messages of this level.
InvalidLevel = _maxLevel + 1
)
// ParseLevel parses a level based on the lower-case or all-caps ASCII
@ -67,6 +72,43 @@ func ParseLevel(text string) (Level, error) {
return level, err
}
type leveledEnabler interface {
LevelEnabler
Level() Level
}
// LevelOf reports the minimum enabled log level for the given LevelEnabler
// from Zap's supported log levels, or [InvalidLevel] if none of them are
// enabled.
//
// A LevelEnabler may implement a 'Level() Level' method to override the
// behavior of this function.
//
// func (c *core) Level() Level {
// return c.currentLevel
// }
//
// It is recommended that [Core] implementations that wrap other cores use
// LevelOf to retrieve the level of the wrapped core. For example,
//
// func (c *coreWrapper) Level() Level {
// return zapcore.LevelOf(c.wrappedCore)
// }
func LevelOf(enab LevelEnabler) Level {
if lvler, ok := enab.(leveledEnabler); ok {
return lvler.Level()
}
for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
if enab.Enabled(lvl) {
return lvl
}
}
return InvalidLevel
}
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -21,9 +21,8 @@
package zapcore
import (
"sync/atomic"
"time"
"go.uber.org/atomic"
)
const (
@ -66,16 +65,16 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
tn := t.UnixNano()
resetAfter := c.resetAt.Load()
if resetAfter > tn {
return c.counter.Inc()
return c.counter.Add(1)
}
c.counter.Store(1)
newResetAfter := tn + tick.Nanoseconds()
if !c.resetAt.CAS(resetAfter, newResetAfter) {
if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) {
// We raced with another goroutine trying to reset, and it also reset
// the counter to 1, so we need to reincrement the counter.
return c.counter.Inc()
return c.counter.Add(1)
}
return 1
@ -113,12 +112,12 @@ func nopSamplingHook(Entry, SamplingDecision) {}
// This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs.
//
// var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 {
// dropped.Inc()
// }
// })
// var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 {
// dropped.Inc()
// }
// })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) {
s.hook = hook
@ -135,7 +134,7 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
//
// For example,
//
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
//
// This will log the first 10 log entries with the same level and message
// in a one second interval as-is. Following that, it will allow through
@ -175,6 +174,11 @@ type sampler struct {
hook func(Entry, SamplingDecision)
}
var (
_ Core = (*sampler)(nil)
_ leveledEnabler = (*sampler)(nil)
)
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
@ -192,6 +196,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
return NewSamplerWithOptions(core, tick, first, thereafter)
}
func (s *sampler) Level() Level {
return LevelOf(s.Core)
}
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -24,6 +24,11 @@ import "go.uber.org/multierr"
type multiCore []Core
var (
_ leveledEnabler = multiCore(nil)
_ Core = multiCore(nil)
)
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core {
return clone
}
func (mc multiCore) Level() Level {
minLvl := _maxLevel // mc is never empty
for i := range mc {
if lvl := LevelOf(mc[i]); lvl < minLvl {
minLvl = lvl
}
}
return minLvl
}
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {