chore: upgrade dependencies

This commit is contained in:
2023-10-15 11:51:11 +02:00
parent 156cc5204d
commit 49aaa38f82
99 changed files with 2799 additions and 2511 deletions

77
vendor/go.uber.org/zap/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,77 @@
output:
# Make output more digestible with quickfix in vim/emacs/etc.
sort-results: true
print-issued-lines: false
linters:
# We'll track the golangci-lint default linters manually
# instead of letting them change without our control.
disable-all: true
enable:
# golangci-lint defaults:
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- unused
# Our own extras:
- gofmt
- nolintlint # lints nolint directives
- revive
linters-settings:
govet:
# These govet checks are disabled by default, but they're useful.
enable:
- niliness
- reflectvaluecompare
- sortslice
- unusedwrite
errcheck:
exclude-functions:
# These methods can not fail.
# They operate on an in-memory buffer.
- (*go.uber.org/zap/buffer.Buffer).Write
- (*go.uber.org/zap/buffer.Buffer).WriteByte
- (*go.uber.org/zap/buffer.Buffer).WriteString
- (*go.uber.org/zap/zapio.Writer).Close
- (*go.uber.org/zap/zapio.Writer).Sync
- (*go.uber.org/zap/zapio.Writer).Write
# Write to zapio.Writer cannot fail,
# so io.WriteString on it cannot fail.
- io.WriteString(*go.uber.org/zap/zapio.Writer)
# Writing a plain string to a fmt.State cannot fail.
- io.WriteString(fmt.State)
issues:
# Print all issues reported by all linters.
max-issues-per-linter: 0
max-same-issues: 0
# Don't ignore some of the issues that golangci-lint considers okay.
# This includes documenting all exported entities.
exclude-use-default: false
exclude-rules:
# Don't warn on unused parameters.
# Parameter names are useful; replacing them with '_' is undesirable.
- linters: [revive]
text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _'
# staticcheck already has smarter checks for empty blocks.
# revive's empty-block linter has false positives.
# For example, as of writing this, the following is not allowed.
# for foo() { }
- linters: [revive]
text: 'empty-block: this block is empty, you can remove it'
# Ignore logger.Sync() errcheck failures in example_test.go
# since those are intended to be uncomplicated examples.
- linters: [errcheck]
path: example_test.go
text: 'Error return value of `logger.Sync` is not checked'

293
vendor/go.uber.org/zap/CHANGELOG.md generated vendored
View File

@ -1,7 +1,91 @@
# Changelog
All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## 1.26.0 (14 Sep 2023)
Enhancements:
* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
context.
* [#1350][]: String encoding is much (~50%) faster now.
Thanks to @jquirke, @cdvr1993 for their contributions to this release.
[#1319]: https://github.com/uber-go/zap/pull/1319
[#1350]: https://github.com/uber-go/zap/pull/1350
## 1.25.0 (1 Aug 2023)
This release contains several improvements including performance, API additions,
and two new experimental packages whose APIs are unstable and may change in the
future.
Enhancements:
* [#1246][]: Add `zap/exp/zapslog` package for integration with slog.
* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set.
* [#1281][]: Add `zap/exp/expfield` package which contains helper methods
`Str` and `Strs` for constructing String-like zap.Fields.
* [#1310][]: Reduce stack size on `Any`.
Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions
to this release.
[#1246]: https://github.com/uber-go/zap/pull/1246
[#1273]: https://github.com/uber-go/zap/pull/1273
[#1281]: https://github.com/uber-go/zap/pull/1281
[#1310]: https://github.com/uber-go/zap/pull/1310
## 1.24.0 (30 Nov 2022)
Enhancements:
* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
current minimum enabled log level.
* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
contributions to this release.
[#1148]: https://github.coml/uber-go/zap/pull/1148
[#1185]: https://github.coml/uber-go/zap/pull/1185
## 1.23.0 (24 Aug 2022)
Enhancements:
* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
`LevelEnabler` or `Core`.
* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
that implement `String() string`.
[#1147]: https://github.com/uber-go/zap/pull/1147
[#1155]: https://github.com/uber-go/zap/pull/1155
## 1.22.0 (8 Aug 2022)
Enhancements:
* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
arrays of objects. With these two constructors, you don't need to implement
`zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
`zapcore.ObjectMarshaler`.
* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
`SugaredLogger` with the provided options applied.
* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
These functions provide a string joining behavior similar to `fmt.Println`.
* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
logger for `Fatal`-level log entries. This defaults to exiting the program.
* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
`NewDevelopment` to panic if the system was unable to build the logger.
* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
a statement dynamically.
Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
for their contributions to this release.
[#1071]: https://github.com/uber-go/zap/pull/1071
[#1079]: https://github.com/uber-go/zap/pull/1079
[#1080]: https://github.com/uber-go/zap/pull/1080
[#1088]: https://github.com/uber-go/zap/pull/1088
[#1108]: https://github.com/uber-go/zap/pull/1108
[#1118]: https://github.com/uber-go/zap/pull/1118
## 1.21.0 (7 Feb 2022)
@ -123,6 +207,16 @@ Enhancements:
Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
[#865]: https://github.com/uber-go/zap/pull/865
[#867]: https://github.com/uber-go/zap/pull/867
[#881]: https://github.com/uber-go/zap/pull/881
[#903]: https://github.com/uber-go/zap/pull/903
[#912]: https://github.com/uber-go/zap/pull/912
[#913]: https://github.com/uber-go/zap/pull/913
[#928]: https://github.com/uber-go/zap/pull/928
[#931]: https://github.com/uber-go/zap/pull/931
[#936]: https://github.com/uber-go/zap/pull/936
## 1.16.0 (1 Sep 2020)
Bugfixes:
@ -144,6 +238,17 @@ Enhancements:
Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
[#629]: https://github.com/uber-go/zap/pull/629
[#697]: https://github.com/uber-go/zap/pull/697
[#828]: https://github.com/uber-go/zap/pull/828
[#835]: https://github.com/uber-go/zap/pull/835
[#843]: https://github.com/uber-go/zap/pull/843
[#844]: https://github.com/uber-go/zap/pull/844
[#852]: https://github.com/uber-go/zap/pull/852
[#854]: https://github.com/uber-go/zap/pull/854
[#861]: https://github.com/uber-go/zap/pull/861
[#862]: https://github.com/uber-go/zap/pull/862
## 1.15.0 (23 Apr 2020)
Bugfixes:
@ -160,6 +265,11 @@ Enhancements:
Thanks to @danielbprice for their contributions to this release.
[#804]: https://github.com/uber-go/zap/pull/804
[#812]: https://github.com/uber-go/zap/pull/812
[#806]: https://github.com/uber-go/zap/pull/806
[#813]: https://github.com/uber-go/zap/pull/813
## 1.14.1 (14 Mar 2020)
Bugfixes:
@ -172,6 +282,10 @@ Bugfixes:
Thanks to @YashishDua for their contributions to this release.
[#791]: https://github.com/uber-go/zap/pull/791
[#795]: https://github.com/uber-go/zap/pull/795
[#799]: https://github.com/uber-go/zap/pull/799
## 1.14.0 (20 Feb 2020)
Enhancements:
@ -182,6 +296,11 @@ Enhancements:
Thanks to @caibirdme for their contributions to this release.
[#771]: https://github.com/uber-go/zap/pull/771
[#773]: https://github.com/uber-go/zap/pull/773
[#775]: https://github.com/uber-go/zap/pull/775
[#786]: https://github.com/uber-go/zap/pull/786
## 1.13.0 (13 Nov 2019)
Enhancements:
@ -190,11 +309,15 @@ Enhancements:
Thanks to @jbizzle for their contributions to this release.
[#758]: https://github.com/uber-go/zap/pull/758
## 1.12.0 (29 Oct 2019)
Enhancements:
* [#751][]: Migrate to Go modules.
[#751]: https://github.com/uber-go/zap/pull/751
## 1.11.0 (21 Oct 2019)
Enhancements:
@ -203,6 +326,9 @@ Enhancements:
Thanks to @juicemia, @uhthomas for their contributions to this release.
[#725]: https://github.com/uber-go/zap/pull/725
[#736]: https://github.com/uber-go/zap/pull/736
## 1.10.0 (29 Apr 2019)
Bugfixes:
@ -220,12 +346,20 @@ Enhancements:
Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
to this release.
[#657]: https://github.com/uber-go/zap/pull/657
[#706]: https://github.com/uber-go/zap/pull/706
[#610]: https://github.com/uber-go/zap/pull/610
[#675]: https://github.com/uber-go/zap/pull/675
[#704]: https://github.com/uber-go/zap/pull/704
## v1.9.1 (06 Aug 2018)
Bugfixes:
* [#614][]: MapObjectEncoder should not ignore empty slices.
[#614]: https://github.com/uber-go/zap/pull/614
## v1.9.0 (19 Jul 2018)
Enhancements:
@ -235,6 +369,10 @@ Enhancements:
Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
@dimroc for their contributions to this release.
[#602]: https://github.com/uber-go/zap/pull/602
[#572]: https://github.com/uber-go/zap/pull/572
[#606]: https://github.com/uber-go/zap/pull/606
## v1.8.0 (13 Apr 2018)
Enhancements:
@ -248,11 +386,18 @@ Bugfixes:
Thanks to @DiSiqueira and @djui for their contributions to this release.
[#508]: https://github.com/uber-go/zap/pull/508
[#518]: https://github.com/uber-go/zap/pull/518
[#577]: https://github.com/uber-go/zap/pull/577
[#574]: https://github.com/uber-go/zap/pull/574
## v1.7.1 (25 Sep 2017)
Bugfixes:
* [#504][]: Store strings when using AddByteString with the map encoder.
[#504]: https://github.com/uber-go/zap/pull/504
## v1.7.0 (21 Sep 2017)
Enhancements:
@ -260,6 +405,8 @@ Enhancements:
* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
to specify the level of the logged messages.
[#487]: https://github.com/uber-go/zap/pull/487
## v1.6.0 (30 Aug 2017)
Enhancements:
@ -268,6 +415,9 @@ Enhancements:
* [#490][]: Add a `ContextMap` method to observer logs for simpler
field validation in tests.
[#490]: https://github.com/uber-go/zap/pull/490
[#491]: https://github.com/uber-go/zap/pull/491
## v1.5.0 (22 Jul 2017)
Enhancements:
@ -281,6 +431,11 @@ Bugfixes:
Thanks to @richard-tunein and @pavius for their contributions to this release.
[#477]: https://github.com/uber-go/zap/pull/477
[#465]: https://github.com/uber-go/zap/pull/465
[#460]: https://github.com/uber-go/zap/pull/460
[#470]: https://github.com/uber-go/zap/pull/470
## v1.4.1 (08 Jun 2017)
This release fixes two bugs.
@ -290,6 +445,9 @@ Bugfixes:
* [#435][]: Support a variety of case conventions when unmarshaling levels.
* [#444][]: Fix a panic in the observer.
[#435]: https://github.com/uber-go/zap/pull/435
[#444]: https://github.com/uber-go/zap/pull/444
## v1.4.0 (12 May 2017)
This release adds a few small features and is fully backward-compatible.
@ -302,6 +460,10 @@ Enhancements:
* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
variety of operations a bit simpler.
[#424]: https://github.com/uber-go/zap/pull/424
[#425]: https://github.com/uber-go/zap/pull/425
[#431]: https://github.com/uber-go/zap/pull/431
## v1.3.0 (25 Apr 2017)
This release adds an enhancement to zap's testing helpers as well as the
@ -313,6 +475,9 @@ Enhancements:
particularly useful when testing the `SugaredLogger`.
* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
[#415]: https://github.com/uber-go/zap/pull/415
[#416]: https://github.com/uber-go/zap/pull/416
## v1.2.0 (13 Apr 2017)
This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
@ -322,6 +487,8 @@ Enhancements:
* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
`grpclog.Logger`.
[#402]: https://github.com/uber-go/zap/pull/402
## v1.1.0 (31 Mar 2017)
This release fixes two bugs and adds some enhancements to zap's testing helpers.
@ -339,6 +506,10 @@ Enhancements:
Thanks to @moitias for contributing to this release.
[#385]: https://github.com/uber-go/zap/pull/385
[#396]: https://github.com/uber-go/zap/pull/396
[#386]: https://github.com/uber-go/zap/pull/386
## v1.0.0 (14 Mar 2017)
This is zap's first stable release. All exported APIs are now final, and no
@ -384,6 +555,20 @@ Enhancements:
Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
contributions to this release.
[#366]: https://github.com/uber-go/zap/pull/366
[#364]: https://github.com/uber-go/zap/pull/364
[#371]: https://github.com/uber-go/zap/pull/371
[#362]: https://github.com/uber-go/zap/pull/362
[#369]: https://github.com/uber-go/zap/pull/369
[#347]: https://github.com/uber-go/zap/pull/347
[#373]: https://github.com/uber-go/zap/pull/373
[#348]: https://github.com/uber-go/zap/pull/348
[#327]: https://github.com/uber-go/zap/pull/327
[#376]: https://github.com/uber-go/zap/pull/376
[#346]: https://github.com/uber-go/zap/pull/346
[#365]: https://github.com/uber-go/zap/pull/365
[#372]: https://github.com/uber-go/zap/pull/372
## v1.0.0-rc.3 (7 Mar 2017)
This is the third release candidate for zap's stable release. There are no
@ -405,6 +590,11 @@ Enhancements:
Thanks to @ansel1 and @suyash for their contributions to this release.
[#339]: https://github.com/uber-go/zap/pull/339
[#307]: https://github.com/uber-go/zap/pull/307
[#353]: https://github.com/uber-go/zap/pull/353
[#311]: https://github.com/uber-go/zap/pull/311
## v1.0.0-rc.2 (21 Feb 2017)
This is the second release candidate for zap's stable release. It includes two
@ -442,6 +632,15 @@ Enhancements:
Thanks to @skipor and @chapsuk for their contributions to this release.
[#316]: https://github.com/uber-go/zap/pull/316
[#309]: https://github.com/uber-go/zap/pull/309
[#317]: https://github.com/uber-go/zap/pull/317
[#321]: https://github.com/uber-go/zap/pull/321
[#325]: https://github.com/uber-go/zap/pull/325
[#333]: https://github.com/uber-go/zap/pull/333
[#326]: https://github.com/uber-go/zap/pull/326
[#300]: https://github.com/uber-go/zap/pull/300
## v1.0.0-rc.1 (14 Feb 2017)
This is the first release candidate for zap's stable release. There are multiple
@ -470,95 +669,3 @@ backward compatibility concerns and all functionality is new.
Early zap adopters should pin to the 0.1.x minor version until they're ready to
upgrade to the upcoming stable release.
[#316]: https://github.com/uber-go/zap/pull/316
[#309]: https://github.com/uber-go/zap/pull/309
[#317]: https://github.com/uber-go/zap/pull/317
[#321]: https://github.com/uber-go/zap/pull/321
[#325]: https://github.com/uber-go/zap/pull/325
[#333]: https://github.com/uber-go/zap/pull/333
[#326]: https://github.com/uber-go/zap/pull/326
[#300]: https://github.com/uber-go/zap/pull/300
[#339]: https://github.com/uber-go/zap/pull/339
[#307]: https://github.com/uber-go/zap/pull/307
[#353]: https://github.com/uber-go/zap/pull/353
[#311]: https://github.com/uber-go/zap/pull/311
[#366]: https://github.com/uber-go/zap/pull/366
[#364]: https://github.com/uber-go/zap/pull/364
[#371]: https://github.com/uber-go/zap/pull/371
[#362]: https://github.com/uber-go/zap/pull/362
[#369]: https://github.com/uber-go/zap/pull/369
[#347]: https://github.com/uber-go/zap/pull/347
[#373]: https://github.com/uber-go/zap/pull/373
[#348]: https://github.com/uber-go/zap/pull/348
[#327]: https://github.com/uber-go/zap/pull/327
[#376]: https://github.com/uber-go/zap/pull/376
[#346]: https://github.com/uber-go/zap/pull/346
[#365]: https://github.com/uber-go/zap/pull/365
[#372]: https://github.com/uber-go/zap/pull/372
[#385]: https://github.com/uber-go/zap/pull/385
[#396]: https://github.com/uber-go/zap/pull/396
[#386]: https://github.com/uber-go/zap/pull/386
[#402]: https://github.com/uber-go/zap/pull/402
[#415]: https://github.com/uber-go/zap/pull/415
[#416]: https://github.com/uber-go/zap/pull/416
[#424]: https://github.com/uber-go/zap/pull/424
[#425]: https://github.com/uber-go/zap/pull/425
[#431]: https://github.com/uber-go/zap/pull/431
[#435]: https://github.com/uber-go/zap/pull/435
[#444]: https://github.com/uber-go/zap/pull/444
[#477]: https://github.com/uber-go/zap/pull/477
[#465]: https://github.com/uber-go/zap/pull/465
[#460]: https://github.com/uber-go/zap/pull/460
[#470]: https://github.com/uber-go/zap/pull/470
[#487]: https://github.com/uber-go/zap/pull/487
[#490]: https://github.com/uber-go/zap/pull/490
[#491]: https://github.com/uber-go/zap/pull/491
[#504]: https://github.com/uber-go/zap/pull/504
[#508]: https://github.com/uber-go/zap/pull/508
[#518]: https://github.com/uber-go/zap/pull/518
[#577]: https://github.com/uber-go/zap/pull/577
[#574]: https://github.com/uber-go/zap/pull/574
[#602]: https://github.com/uber-go/zap/pull/602
[#572]: https://github.com/uber-go/zap/pull/572
[#606]: https://github.com/uber-go/zap/pull/606
[#614]: https://github.com/uber-go/zap/pull/614
[#657]: https://github.com/uber-go/zap/pull/657
[#706]: https://github.com/uber-go/zap/pull/706
[#610]: https://github.com/uber-go/zap/pull/610
[#675]: https://github.com/uber-go/zap/pull/675
[#704]: https://github.com/uber-go/zap/pull/704
[#725]: https://github.com/uber-go/zap/pull/725
[#736]: https://github.com/uber-go/zap/pull/736
[#751]: https://github.com/uber-go/zap/pull/751
[#758]: https://github.com/uber-go/zap/pull/758
[#771]: https://github.com/uber-go/zap/pull/771
[#773]: https://github.com/uber-go/zap/pull/773
[#775]: https://github.com/uber-go/zap/pull/775
[#786]: https://github.com/uber-go/zap/pull/786
[#791]: https://github.com/uber-go/zap/pull/791
[#795]: https://github.com/uber-go/zap/pull/795
[#799]: https://github.com/uber-go/zap/pull/799
[#804]: https://github.com/uber-go/zap/pull/804
[#812]: https://github.com/uber-go/zap/pull/812
[#806]: https://github.com/uber-go/zap/pull/806
[#813]: https://github.com/uber-go/zap/pull/813
[#629]: https://github.com/uber-go/zap/pull/629
[#697]: https://github.com/uber-go/zap/pull/697
[#828]: https://github.com/uber-go/zap/pull/828
[#835]: https://github.com/uber-go/zap/pull/835
[#843]: https://github.com/uber-go/zap/pull/843
[#844]: https://github.com/uber-go/zap/pull/844
[#852]: https://github.com/uber-go/zap/pull/852
[#854]: https://github.com/uber-go/zap/pull/854
[#861]: https://github.com/uber-go/zap/pull/861
[#862]: https://github.com/uber-go/zap/pull/862
[#865]: https://github.com/uber-go/zap/pull/865
[#867]: https://github.com/uber-go/zap/pull/867
[#881]: https://github.com/uber-go/zap/pull/881
[#903]: https://github.com/uber-go/zap/pull/903
[#912]: https://github.com/uber-go/zap/pull/912
[#913]: https://github.com/uber-go/zap/pull/913
[#928]: https://github.com/uber-go/zap/pull/928
[#931]: https://github.com/uber-go/zap/pull/931
[#936]: https://github.com/uber-go/zap/pull/936

View File

@ -16,7 +16,7 @@ you to accept the CLA when you open your pull request.
[Fork][fork], then clone the repository:
```
```bash
mkdir -p $GOPATH/src/go.uber.org
cd $GOPATH/src/go.uber.org
git clone git@github.com:your_github_username/zap.git
@ -27,21 +27,16 @@ git fetch upstream
Make sure that the tests and the linters pass:
```
```bash
make test
make lint
```
If you're not using the minor version of Go specified in the Makefile's
`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
fine, but it means that you'll only discover lint failures after you open your
pull request.
## Making Changes
Start by creating a new branch for your changes:
```
```bash
cd $GOPATH/src/go.uber.org/zap
git checkout master
git fetch upstream
@ -52,22 +47,22 @@ git checkout -b cool_new_feature
Make your changes, then ensure that `make lint` and `make test` still pass. If
you're satisfied with your changes, push them to your fork.
```
```bash
git push origin cool_new_feature
```
Then use the GitHub UI to open a pull request.
At this point, you're waiting on us to review your changes. We *try* to respond
At this point, you're waiting on us to review your changes. We _try_ to respond
to issues and pull requests within a few business days, and we may suggest some
improvements or alternatives. Once your changes are approved, one of the
project maintainers will merge them.
We're much more likely to approve your changes if you:
* Add tests for new functionality.
* Write a [good commit message][commit-message].
* Maintain backward compatibility.
- Add tests for new functionality.
- Write a [good commit message][commit-message].
- Maintain backward compatibility.
[fork]: https://github.com/uber-go/zap/fork
[open-issue]: https://github.com/uber-go/zap/issues/new

83
vendor/go.uber.org/zap/Makefile generated vendored
View File

@ -1,50 +1,51 @@
export GOBIN ?= $(shell pwd)/bin
# Directory containing the Makefile.
PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
GOLINT = $(GOBIN)/golint
STATICCHECK = $(GOBIN)/staticcheck
export GOBIN ?= $(PROJECT_ROOT)/bin
export PATH := $(GOBIN):$(PATH)
GOVULNCHECK = $(GOBIN)/govulncheck
BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
# Directories containing independent Go modules.
#
# We track coverage only for the main module.
MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
# Many Go tools take file globs or directories as arguments instead of packages.
GO_FILES := $(shell \
find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
-o -name '*.go' -print | cut -b3-)
# Directories that we want to track coverage for.
COVER_DIRS = . ./exp
.PHONY: all
all: lint test
.PHONY: lint
lint: $(GOLINT) $(STATICCHECK)
@rm -rf lint.log
@echo "Checking formatting..."
@gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
@echo "Checking vet..."
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
@echo "Checking lint..."
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
@echo "Checking staticcheck..."
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
@echo "Checking for unresolved FIXMEs..."
@git grep -i fixme | grep -v -e Makefile | tee -a lint.log
@echo "Checking for license headers..."
@./checklicense.sh | tee -a lint.log
@[ ! -s lint.log ]
@echo "Checking 'go mod tidy'..."
@make tidy
@if ! git diff --quiet; then \
echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
git --no-pager diff; \
fi
lint: golangci-lint tidy-lint license-lint
$(GOLINT):
cd tools && go install golang.org/x/lint/golint
.PHONY: golangci-lint
golangci-lint:
@$(foreach mod,$(MODULE_DIRS), \
(cd $(mod) && \
echo "[lint] golangci-lint: $(mod)" && \
golangci-lint run --path-prefix $(mod)) &&) true
$(STATICCHECK):
cd tools && go install honnef.co/go/tools/cmd/staticcheck
.PHONY: tidy
tidy:
@$(foreach dir,$(MODULE_DIRS), \
(cd $(dir) && go mod tidy) &&) true
.PHONY: tidy-lint
tidy-lint:
@$(foreach mod,$(MODULE_DIRS), \
(cd $(mod) && \
echo "[lint] tidy: $(mod)" && \
go mod tidy && \
git diff --exit-code -- go.mod go.sum) &&) true
.PHONY: license-lint
license-lint:
./checklicense.sh
$(GOVULNCHECK):
cd tools && go install golang.org/x/vuln/cmd/govulncheck
.PHONY: test
test:
@ -52,8 +53,10 @@ test:
.PHONY: cover
cover:
go test -race -coverprofile=cover.out -coverpkg=./... ./...
go tool cover -html=cover.out -o cover.html
@$(foreach dir,$(COVER_DIRS), ( \
cd $(dir) && \
go test -race -coverprofile=cover.out -coverpkg=./... ./... \
&& go tool cover -html=cover.out -o cover.html) &&) true
.PHONY: bench
BENCH ?= .
@ -68,6 +71,6 @@ updatereadme:
rm -f README.md
cat .readme.tmpl | go run internal/readme/readme.go > README.md
.PHONY: tidy
tidy:
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
.PHONY: vulncheck
vulncheck: $(GOVULNCHECK)
$(GOVULNCHECK) ./...

47
vendor/go.uber.org/zap/README.md generated vendored
View File

@ -66,38 +66,41 @@ Log a message and 10 fields:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 2900 ns/op | +0% | 5 allocs/op
| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op
| zerolog | 10639 ns/op | +267% | 32 allocs/op
| go-kit | 14434 ns/op | +398% | 59 allocs/op
| logrus | 17104 ns/op | +490% | 81 allocs/op
| apex/log | 32424 ns/op | +1018% | 66 allocs/op
| log15 | 33579 ns/op | +1058% | 76 allocs/op
| :zap: zap | 1744 ns/op | +0% | 5 allocs/op
| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op
| zerolog | 918 ns/op | -47% | 1 allocs/op
| go-kit | 5590 ns/op | +221% | 57 allocs/op
| slog | 5640 ns/op | +223% | 40 allocs/op
| apex/log | 21184 ns/op | +1115% | 63 allocs/op
| logrus | 24338 ns/op | +1296% | 79 allocs/op
| log15 | 26054 ns/op | +1394% | 74 allocs/op
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 373 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op
| zerolog | 288 ns/op | -23% | 0 allocs/op
| go-kit | 11785 ns/op | +3060% | 58 allocs/op
| logrus | 19629 ns/op | +5162% | 70 allocs/op
| log15 | 21866 ns/op | +5762% | 72 allocs/op
| apex/log | 30890 ns/op | +8182% | 55 allocs/op
| :zap: zap | 193 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op
| zerolog | 81 ns/op | -58% | 0 allocs/op
| slog | 322 ns/op | +67% | 0 allocs/op
| go-kit | 5377 ns/op | +2686% | 56 allocs/op
| apex/log | 19518 ns/op | +10013% | 53 allocs/op
| log15 | 19812 ns/op | +10165% | 70 allocs/op
| logrus | 21997 ns/op | +11297% | 68 allocs/op
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 381 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op
| zerolog | 369 ns/op | -3% | 0 allocs/op
| standard library | 385 ns/op | +1% | 2 allocs/op
| go-kit | 606 ns/op | +59% | 11 allocs/op
| logrus | 1730 ns/op | +354% | 25 allocs/op
| apex/log | 1998 ns/op | +424% | 7 allocs/op
| log15 | 4546 ns/op | +1093% | 22 allocs/op
| :zap: zap | 165 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op
| zerolog | 95 ns/op | -42% | 0 allocs/op
| slog | 296 ns/op | +79% | 0 allocs/op
| go-kit | 415 ns/op | +152% | 9 allocs/op
| standard library | 422 ns/op | +156% | 2 allocs/op
| apex/log | 1601 ns/op | +870% | 5 allocs/op
| logrus | 3017 ns/op | +1728% | 23 allocs/op
| log15 | 3469 ns/op | +2002% | 20 allocs/op
## Development Status: Stable

127
vendor/go.uber.org/zap/array.go generated vendored
View File

@ -21,6 +21,7 @@
package zap
import (
"fmt"
"time"
"go.uber.org/zap/zapcore"
@ -94,11 +95,137 @@ func Int8s(key string, nums []int8) Field {
return Array(key, int8s(nums))
}
// Objects constructs a field with the given key, holding a list of the
// provided objects that can be marshaled by Zap.
//
// Note that these objects must implement zapcore.ObjectMarshaler directly.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the Request type, not its pointer (*Request).
// If it's on the pointer, use ObjectValues.
//
// Given an object that implements MarshalLogObject on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Author struct{ ... }
// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var authors []Author = ...
// logger.Info("loading article", zap.Objects("authors", authors))
//
// Similarly, given a type that implements MarshalLogObject on its pointer
// receiver, you can log a slice of pointers to that object with Objects like
// so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
//
// If instead, you have a slice of values of such an object, use the
// ObjectValues constructor.
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
return Array(key, objects[T](values))
}
type objects[T zapcore.ObjectMarshaler] []T
func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
if err := arr.AppendObject(o); err != nil {
return err
}
}
return nil
}
// ObjectMarshalerPtr is a constraint that specifies that the given type
// implements zapcore.ObjectMarshaler on a pointer receiver.
type ObjectMarshalerPtr[T any] interface {
*T
zapcore.ObjectMarshaler
}
// ObjectValues constructs a field with the given key, holding a list of the
// provided objects, where pointers to these objects can be marshaled by Zap.
//
// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the *Request type, not the value (Request).
// If it's on the value, use Objects.
//
// Given an object that implements MarshalLogObject on the pointer receiver,
// you can log a slice of those objects with ObjectValues like so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
//
// If instead, you have a slice of pointers of such an object, use the Objects
// field constructor.
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
return Array(key, objectValues[T, P](values))
}
type objectValues[T any, P ObjectMarshalerPtr[T]] []T
func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range os {
// It is necessary for us to explicitly reference the "P" type.
// We cannot simply pass "&os[i]" to AppendObject because its type
// is "*T", which the type system does not consider as
// implementing ObjectMarshaler.
// Only the type "P" satisfies ObjectMarshaler, which we have
// to convert "*T" to explicitly.
var p P = &os[i]
if err := arr.AppendObject(p); err != nil {
return err
}
}
return nil
}
// Strings constructs a field that carries a slice of strings.
func Strings(key string, ss []string) Field {
return Array(key, stringArray(ss))
}
// Stringers constructs a field with the given key, holding a list of the
// output provided by the value's String method
//
// Given an object that implements String on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Request struct{ ... }
// func (a Request) String() string
//
// var requests []Request = ...
// logger.Info("sending requests", zap.Stringers("requests", requests))
//
// Note that these objects must implement fmt.Stringer directly.
// That is, if you're trying to marshal a []Request, the String method
// must be declared on the Request type, not its pointer (*Request).
func Stringers[T fmt.Stringer](key string, values []T) Field {
return Array(key, stringers[T](values))
}
type stringers[T fmt.Stringer] []T
func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
arr.AppendString(o.String())
}
return nil
}
// Times constructs a field that carries a slice of time.Times.
func Times(key string, ts []time.Time) Field {
return Array(key, times(ts))

View File

@ -42,6 +42,11 @@ func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v)
}
// AppendBytes writes a single byte to the Buffer.
func (b *Buffer) AppendBytes(v []byte) {
b.bs = append(b.bs, v...)
}
// AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...)

View File

@ -20,25 +20,29 @@
package buffer
import "sync"
import (
"go.uber.org/zap/internal/pool"
)
// A Pool is a type-safe wrapper around a sync.Pool.
type Pool struct {
p *sync.Pool
p *pool.Pool[*Buffer]
}
// NewPool constructs a new Pool.
func NewPool() Pool {
return Pool{p: &sync.Pool{
New: func() interface{} {
return &Buffer{bs: make([]byte, 0, _size)}
},
}}
return Pool{
p: pool.New(func() *Buffer {
return &Buffer{
bs: make([]byte, 0, _size),
}
}),
}
}
// Get retrieves a Buffer from the pool, creating one if necessary.
func (p Pool) Get() *Buffer {
buf := p.p.Get().(*Buffer)
buf := p.p.Get()
buf.Reset()
buf.pool = p
return buf

88
vendor/go.uber.org/zap/config.go generated vendored
View File

@ -21,7 +21,7 @@
package zap
import (
"fmt"
"errors"
"sort"
"time"
@ -95,6 +95,32 @@ type Config struct {
// NewProductionEncoderConfig returns an opinionated EncoderConfig for
// production environments.
//
// Messages encoded with this configuration will be JSON-formatted
// and will have the following keys by default:
//
// - "level": The logging level (e.g. "info", "error").
// - "ts": The current time in number of seconds since the Unix epoch.
// - "msg": The message passed to the log statement.
// - "caller": If available, a short path to the file and line number
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
// - "stacktrace": If available, a stack trace from the line
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
//
// By default, the following formats are used for different types:
//
// - Time is formatted as floating-point number of seconds since the Unix
// epoch.
// - Duration is formatted as floating-point number of seconds.
//
// You may change these by setting the appropriate fields in the returned
// object.
// For example, use the following to change the time encoding format:
//
// cfg := zap.NewProductionEncoderConfig()
// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewProductionEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
TimeKey: "ts",
@ -112,11 +138,22 @@ func NewProductionEncoderConfig() zapcore.EncoderConfig {
}
}
// NewProductionConfig is a reasonable production logging configuration.
// Logging is enabled at InfoLevel and above.
// NewProductionConfig builds a reasonable default production logging
// configuration.
// Logging is enabled at InfoLevel and above, and uses a JSON encoder.
// Logs are written to standard error.
// Stacktraces are included on logs of ErrorLevel and above.
// DPanicLevel logs will not panic, but will write a stacktrace.
//
// It uses a JSON encoder, writes to standard error, and enables sampling.
// Stacktraces are automatically included on logs of ErrorLevel and above.
// Sampling is enabled at 100:100 by default,
// meaning that after the first 100 log entries
// with the same level and message in the same second,
// it will log every 100th entry
// with the same level and message in the same second.
// You may disable this behavior by setting Sampling to nil.
//
// See [NewProductionEncoderConfig] for information
// on the default encoder configuration.
func NewProductionConfig() Config {
return Config{
Level: NewAtomicLevelAt(InfoLevel),
@ -134,6 +171,32 @@ func NewProductionConfig() Config {
// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
// development environments.
//
// Messages encoded with this configuration will use Zap's console encoder
// intended to print human-readable output.
// It will print log messages with the following information:
//
// - The log level (e.g. "INFO", "ERROR").
// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
// - The message passed to the log statement.
// - If available, a short path to the file and line number
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
// - If available, a stacktrace from the line
// where the log statement was issued.
// The logger configuration determines whether this field is captured.
//
// By default, the following formats are used for different types:
//
// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
// - Duration is formatted as a string (e.g. "1.234s").
//
// You may change these by setting the appropriate fields in the returned
// object.
// For example, use the following to change the time encoding format:
//
// cfg := zap.NewDevelopmentEncoderConfig()
// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
// Keys can be anything except the empty string.
@ -152,12 +215,15 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
}
}
// NewDevelopmentConfig is a reasonable development logging configuration.
// Logging is enabled at DebugLevel and above.
// NewDevelopmentConfig builds a reasonable default development logging
// configuration.
// Logging is enabled at DebugLevel and above, and uses a console encoder.
// Logs are written to standard error.
// Stacktraces are included on logs of WarnLevel and above.
// DPanicLevel logs will panic.
//
// It enables development mode (which makes DPanicLevel logs panic), uses a
// console encoder, writes to standard error, and disables sampling.
// Stacktraces are automatically included on logs of WarnLevel and above.
// See [NewDevelopmentEncoderConfig] for information
// on the default encoder configuration.
func NewDevelopmentConfig() Config {
return Config{
Level: NewAtomicLevelAt(DebugLevel),
@ -182,7 +248,7 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) {
}
if cfg.Level == (AtomicLevel{}) {
return nil, fmt.Errorf("missing Level")
return nil, errors.New("missing Level")
}
log := New(

60
vendor/go.uber.org/zap/doc.go generated vendored
View File

@ -32,7 +32,7 @@
// they need to count every allocation and when they'd prefer a more familiar,
// loosely typed API.
//
// Choosing a Logger
// # Choosing a Logger
//
// In contexts where performance is nice, but not critical, use the
// SugaredLogger. It's 4-10x faster than other structured logging packages and
@ -41,14 +41,15 @@
// variadic number of key-value pairs. (For more advanced use cases, they also
// accept strongly typed fields - see the SugaredLogger.With documentation for
// details.)
// sugar := zap.NewExample().Sugar()
// defer sugar.Sync()
// sugar.Infow("failed to fetch URL",
// "url", "http://example.com",
// "attempt", 3,
// "backoff", time.Second,
// )
// sugar.Infof("failed to fetch URL: %s", "http://example.com")
//
// sugar := zap.NewExample().Sugar()
// defer sugar.Sync()
// sugar.Infow("failed to fetch URL",
// "url", "http://example.com",
// "attempt", 3,
// "backoff", time.Second,
// )
// sugar.Infof("failed to fetch URL: %s", "http://example.com")
//
// By default, loggers are unbuffered. However, since zap's low-level APIs
// allow buffering, calling Sync before letting your process exit is a good
@ -57,32 +58,35 @@
// In the rare contexts where every microsecond and every allocation matter,
// use the Logger. It's even faster than the SugaredLogger and allocates far
// less, but it only supports strongly-typed, structured logging.
// logger := zap.NewExample()
// defer logger.Sync()
// logger.Info("failed to fetch URL",
// zap.String("url", "http://example.com"),
// zap.Int("attempt", 3),
// zap.Duration("backoff", time.Second),
// )
//
// logger := zap.NewExample()
// defer logger.Sync()
// logger.Info("failed to fetch URL",
// zap.String("url", "http://example.com"),
// zap.Int("attempt", 3),
// zap.Duration("backoff", time.Second),
// )
//
// Choosing between the Logger and SugaredLogger doesn't need to be an
// application-wide decision: converting between the two is simple and
// inexpensive.
// logger := zap.NewExample()
// defer logger.Sync()
// sugar := logger.Sugar()
// plain := sugar.Desugar()
//
// Configuring Zap
// logger := zap.NewExample()
// defer logger.Sync()
// sugar := logger.Sugar()
// plain := sugar.Desugar()
//
// # Configuring Zap
//
// The simplest way to build a Logger is to use zap's opinionated presets:
// NewExample, NewProduction, and NewDevelopment. These presets build a logger
// with a single function call:
// logger, err := zap.NewProduction()
// if err != nil {
// log.Fatalf("can't initialize zap logger: %v", err)
// }
// defer logger.Sync()
//
// logger, err := zap.NewProduction()
// if err != nil {
// log.Fatalf("can't initialize zap logger: %v", err)
// }
// defer logger.Sync()
//
// Presets are fine for small projects, but larger projects and organizations
// naturally require a bit more customization. For most users, zap's Config
@ -94,7 +98,7 @@
// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
// example for sample code.
//
// Extending Zap
// # Extending Zap
//
// The zap package itself is a relatively thin wrapper around the interfaces
// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
@ -106,7 +110,7 @@
// Similarly, package authors can use the high-performance Encoder and Core
// implementations in the zapcore package to build their own loggers.
//
// Frequently Asked Questions
// # Frequently Asked Questions
//
// An FAQ covering everything from installation errors to design decisions is
// available at https://github.com/uber-go/zap/blob/master/FAQ.md.

2
vendor/go.uber.org/zap/encoder.go generated vendored
View File

@ -63,7 +63,7 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco
func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
return nil, fmt.Errorf("missing EncodeTime in EncoderConfig")
return nil, errors.New("missing EncodeTime in EncoderConfig")
}
_encoderMutex.RLock()

14
vendor/go.uber.org/zap/error.go generated vendored
View File

@ -21,14 +21,13 @@
package zap
import (
"sync"
"go.uber.org/zap/internal/pool"
"go.uber.org/zap/zapcore"
)
var _errArrayElemPool = sync.Pool{New: func() interface{} {
var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
}}
})
// Error is shorthand for the common idiom NamedError("error", err).
func Error(err error) Field {
@ -60,11 +59,14 @@ func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
// potentially an "errorVerbose" attribute, we need to wrap it in a
// type that implements LogObjectMarshaler. To prevent this from
// allocating, pool the wrapper type.
elem := _errArrayElemPool.Get().(*errArrayElem)
elem := _errArrayElemPool.Get()
elem.error = errs[i]
arr.AppendObject(elem)
err := arr.AppendObject(elem)
elem.error = nil
_errArrayElemPool.Put(elem)
if err != nil {
return err
}
}
return nil
}

194
vendor/go.uber.org/zap/field.go generated vendored
View File

@ -25,6 +25,7 @@ import (
"math"
"time"
"go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@ -374,7 +375,7 @@ func StackSkip(key string, skip int) Field {
// from expanding the zapcore.Field union struct to include a byte slice. Since
// taking a stacktrace is already so expensive (~10us), the extra allocation
// is okay.
return String(key, takeStacktrace(skip+1)) // skip StackSkip
return String(key, stacktrace.Take(skip+1)) // skip StackSkip
}
// Duration constructs a field with the given key and value. The encoder
@ -410,6 +411,63 @@ func Inline(val zapcore.ObjectMarshaler) Field {
}
}
// Dict constructs a field containing the provided key-value pairs.
// It acts similar to [Object], but with the fields specified as arguments.
func Dict(key string, val ...Field) Field {
return dictField(key, val)
}
// We need a function with the signature (string, T) for zap.Any.
func dictField(key string, val []Field) Field {
return Object(key, dictObject(val))
}
type dictObject []Field
func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
for _, f := range d {
f.AddTo(enc)
}
return nil
}
// We discovered an issue where zap.Any can cause a performance degradation
// when used in new goroutines.
//
// This happens because the compiler assigns 4.8kb (one zap.Field per arm of
// switch statement) of stack space for zap.Any when it takes the form:
//
// switch v := v.(type) {
// case string:
// return String(key, v)
// case int:
// return Int(key, v)
// // ...
// default:
// return Reflect(key, v)
// }
//
// To avoid this, we use the type switch to assign a value to a single local variable
// and then call a function on it.
// The local variable is just a function reference so it doesn't allocate
// when converted to an interface{}.
//
// A fair bit of experimentation went into this.
// See also:
//
// - https://github.com/uber-go/zap/pull/1301
// - https://github.com/uber-go/zap/pull/1303
// - https://github.com/uber-go/zap/pull/1304
// - https://github.com/uber-go/zap/pull/1305
// - https://github.com/uber-go/zap/pull/1308
type anyFieldC[T any] func(string, T) Field
func (f anyFieldC[T]) Any(key string, val any) Field {
v, _ := val.(T)
// val is guaranteed to be a T, except when it's nil.
return f(key, v)
}
// Any takes a key and an arbitrary value and chooses the best way to represent
// them as a field, falling back to a reflection-based approach only if
// necessary.
@ -418,132 +476,138 @@ func Inline(val zapcore.ObjectMarshaler) Field {
// them. To minimize surprises, []byte values are treated as binary blobs, byte
// values are treated as uint8, and runes are always treated as integers.
func Any(key string, value interface{}) Field {
switch val := value.(type) {
var c interface{ Any(string, any) Field }
switch value.(type) {
case zapcore.ObjectMarshaler:
return Object(key, val)
c = anyFieldC[zapcore.ObjectMarshaler](Object)
case zapcore.ArrayMarshaler:
return Array(key, val)
c = anyFieldC[zapcore.ArrayMarshaler](Array)
case []Field:
c = anyFieldC[[]Field](dictField)
case bool:
return Bool(key, val)
c = anyFieldC[bool](Bool)
case *bool:
return Boolp(key, val)
c = anyFieldC[*bool](Boolp)
case []bool:
return Bools(key, val)
c = anyFieldC[[]bool](Bools)
case complex128:
return Complex128(key, val)
c = anyFieldC[complex128](Complex128)
case *complex128:
return Complex128p(key, val)
c = anyFieldC[*complex128](Complex128p)
case []complex128:
return Complex128s(key, val)
c = anyFieldC[[]complex128](Complex128s)
case complex64:
return Complex64(key, val)
c = anyFieldC[complex64](Complex64)
case *complex64:
return Complex64p(key, val)
c = anyFieldC[*complex64](Complex64p)
case []complex64:
return Complex64s(key, val)
c = anyFieldC[[]complex64](Complex64s)
case float64:
return Float64(key, val)
c = anyFieldC[float64](Float64)
case *float64:
return Float64p(key, val)
c = anyFieldC[*float64](Float64p)
case []float64:
return Float64s(key, val)
c = anyFieldC[[]float64](Float64s)
case float32:
return Float32(key, val)
c = anyFieldC[float32](Float32)
case *float32:
return Float32p(key, val)
c = anyFieldC[*float32](Float32p)
case []float32:
return Float32s(key, val)
c = anyFieldC[[]float32](Float32s)
case int:
return Int(key, val)
c = anyFieldC[int](Int)
case *int:
return Intp(key, val)
c = anyFieldC[*int](Intp)
case []int:
return Ints(key, val)
c = anyFieldC[[]int](Ints)
case int64:
return Int64(key, val)
c = anyFieldC[int64](Int64)
case *int64:
return Int64p(key, val)
c = anyFieldC[*int64](Int64p)
case []int64:
return Int64s(key, val)
c = anyFieldC[[]int64](Int64s)
case int32:
return Int32(key, val)
c = anyFieldC[int32](Int32)
case *int32:
return Int32p(key, val)
c = anyFieldC[*int32](Int32p)
case []int32:
return Int32s(key, val)
c = anyFieldC[[]int32](Int32s)
case int16:
return Int16(key, val)
c = anyFieldC[int16](Int16)
case *int16:
return Int16p(key, val)
c = anyFieldC[*int16](Int16p)
case []int16:
return Int16s(key, val)
c = anyFieldC[[]int16](Int16s)
case int8:
return Int8(key, val)
c = anyFieldC[int8](Int8)
case *int8:
return Int8p(key, val)
c = anyFieldC[*int8](Int8p)
case []int8:
return Int8s(key, val)
c = anyFieldC[[]int8](Int8s)
case string:
return String(key, val)
c = anyFieldC[string](String)
case *string:
return Stringp(key, val)
c = anyFieldC[*string](Stringp)
case []string:
return Strings(key, val)
c = anyFieldC[[]string](Strings)
case uint:
return Uint(key, val)
c = anyFieldC[uint](Uint)
case *uint:
return Uintp(key, val)
c = anyFieldC[*uint](Uintp)
case []uint:
return Uints(key, val)
c = anyFieldC[[]uint](Uints)
case uint64:
return Uint64(key, val)
c = anyFieldC[uint64](Uint64)
case *uint64:
return Uint64p(key, val)
c = anyFieldC[*uint64](Uint64p)
case []uint64:
return Uint64s(key, val)
c = anyFieldC[[]uint64](Uint64s)
case uint32:
return Uint32(key, val)
c = anyFieldC[uint32](Uint32)
case *uint32:
return Uint32p(key, val)
c = anyFieldC[*uint32](Uint32p)
case []uint32:
return Uint32s(key, val)
c = anyFieldC[[]uint32](Uint32s)
case uint16:
return Uint16(key, val)
c = anyFieldC[uint16](Uint16)
case *uint16:
return Uint16p(key, val)
c = anyFieldC[*uint16](Uint16p)
case []uint16:
return Uint16s(key, val)
c = anyFieldC[[]uint16](Uint16s)
case uint8:
return Uint8(key, val)
c = anyFieldC[uint8](Uint8)
case *uint8:
return Uint8p(key, val)
c = anyFieldC[*uint8](Uint8p)
case []byte:
return Binary(key, val)
c = anyFieldC[[]byte](Binary)
case uintptr:
return Uintptr(key, val)
c = anyFieldC[uintptr](Uintptr)
case *uintptr:
return Uintptrp(key, val)
c = anyFieldC[*uintptr](Uintptrp)
case []uintptr:
return Uintptrs(key, val)
c = anyFieldC[[]uintptr](Uintptrs)
case time.Time:
return Time(key, val)
c = anyFieldC[time.Time](Time)
case *time.Time:
return Timep(key, val)
c = anyFieldC[*time.Time](Timep)
case []time.Time:
return Times(key, val)
c = anyFieldC[[]time.Time](Times)
case time.Duration:
return Duration(key, val)
c = anyFieldC[time.Duration](Duration)
case *time.Duration:
return Durationp(key, val)
c = anyFieldC[*time.Duration](Durationp)
case []time.Duration:
return Durations(key, val)
c = anyFieldC[[]time.Duration](Durations)
case error:
return NamedError(key, val)
c = anyFieldC[error](NamedError)
case []error:
return Errors(key, val)
c = anyFieldC[[]error](Errors)
case fmt.Stringer:
return Stringer(key, val)
c = anyFieldC[fmt.Stringer](Stringer)
default:
return Reflect(key, val)
c = anyFieldC[any](Reflect)
}
return c.Any(key, value)
}

View File

@ -22,6 +22,7 @@ package zap
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -32,22 +33,23 @@ import (
// ServeHTTP is a simple JSON endpoint that can report on or change the current
// logging level.
//
// GET
// # GET
//
// The GET request returns a JSON description of the current logging level like:
// {"level":"info"}
//
// PUT
// {"level":"info"}
//
// # PUT
//
// The PUT request changes the logging level. It is perfectly safe to change the
// logging level while a program is running. Two content types are supported:
//
// Content-Type: application/x-www-form-urlencoded
// Content-Type: application/x-www-form-urlencoded
//
// With this content type, the level can be provided through the request body or
// a query parameter. The log level is URL encoded like:
//
// level=debug
// level=debug
//
// The request body takes precedence over the query parameter, if both are
// specified.
@ -55,19 +57,25 @@ import (
// This content type is the default for a curl PUT request. Following are two
// example curl requests that both set the logging level to debug.
//
// curl -X PUT localhost:8080/log/level?level=debug
// curl -X PUT localhost:8080/log/level -d level=debug
// curl -X PUT localhost:8080/log/level?level=debug
// curl -X PUT localhost:8080/log/level -d level=debug
//
// For any other content type, the payload is expected to be JSON encoded and
// look like:
//
// {"level":"info"}
// {"level":"info"}
//
// An example curl request could look like this:
//
// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
//
// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if err := lvl.serveHTTP(w, r); err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "internal error: %v", err)
}
}
func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
type errorResponse struct {
Error string `json:"error"`
}
@ -79,19 +87,20 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
enc.Encode(payload{Level: lvl.Level()})
return enc.Encode(payload{Level: lvl.Level()})
case http.MethodPut:
requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
enc.Encode(errorResponse{Error: err.Error()})
return
return enc.Encode(errorResponse{Error: err.Error()})
}
lvl.SetLevel(requestedLvl)
enc.Encode(payload{Level: lvl.Level()})
return enc.Encode(payload{Level: lvl.Level()})
default:
w.WriteHeader(http.StatusMethodNotAllowed)
enc.Encode(errorResponse{
return enc.Encode(errorResponse{
Error: "Only GET and PUT are supported.",
})
}
@ -108,7 +117,7 @@ func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error
func decodePutURL(r *http.Request) (zapcore.Level, error) {
lvl := r.FormValue("level")
if lvl == "" {
return 0, fmt.Errorf("must specify logging level")
return 0, errors.New("must specify logging level")
}
var l zapcore.Level
if err := l.UnmarshalText([]byte(lvl)); err != nil {
@ -125,8 +134,7 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) {
return 0, fmt.Errorf("malformed request body: %v", err)
}
if pld.Level == nil {
return 0, fmt.Errorf("must specify logging level")
return 0, errors.New("must specify logging level")
}
return *pld.Level, nil
}

View File

@ -24,24 +24,25 @@ package exit
import "os"
var real = func() { os.Exit(1) }
var _exit = os.Exit
// Exit normally terminates the process by calling os.Exit(1). If the package
// is stubbed, it instead records a call in the testing spy.
func Exit() {
real()
// With terminates the process by calling os.Exit(code). If the package is
// stubbed, it instead records a call in the testing spy.
func With(code int) {
_exit(code)
}
// A StubbedExit is a testing fake for os.Exit.
type StubbedExit struct {
Exited bool
prev func()
Code int
prev func(code int)
}
// Stub substitutes a fake for the call to os.Exit(1).
func Stub() *StubbedExit {
s := &StubbedExit{prev: real}
real = s.exit
s := &StubbedExit{prev: _exit}
_exit = s.exit
return s
}
@ -56,9 +57,10 @@ func WithStub(f func()) *StubbedExit {
// Unstub restores the previous exit function.
func (se *StubbedExit) Unstub() {
real = se.prev
_exit = se.prev
}
func (se *StubbedExit) exit() {
func (se *StubbedExit) exit(code int) {
se.Exited = true
se.Code = code
}

37
vendor/go.uber.org/zap/internal/level_enabler.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package internal and its subpackages hold types and functionality
// that are not part of Zap's public API.
package internal
import "go.uber.org/zap/zapcore"
// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
// report their own level.
//
// This interface is defined to use more conveniently in tests and non-zapcore
// packages.
// This cannot be imported from zapcore because of the cyclic dependency.
type LeveledEnabler interface {
zapcore.LevelEnabler
Level() zapcore.Level
}

58
vendor/go.uber.org/zap/internal/pool/pool.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
// Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package pool provides internal pool utilities.
package pool
import (
"sync"
)
// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed
// object pooling.
//
// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will
// not be detected, so all internal pool use must take care to only store
// pointer types.
type Pool[T any] struct {
pool sync.Pool
}
// New returns a new [Pool] for T, and will use fn to construct new Ts when
// the pool is empty.
func New[T any](fn func() T) *Pool[T] {
return &Pool[T]{
pool: sync.Pool{
New: func() any {
return fn()
},
},
}
}
// Get gets a T from the pool, or creates a new one if the pool is empty.
func (p *Pool[T]) Get() T {
return p.pool.Get().(T)
}
// Put returns x into the pool.
func (p *Pool[T]) Put(x T) {
p.pool.Put(x)
}

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -18,25 +18,26 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
// Package stacktrace provides support for gathering stack traces
// efficiently.
package stacktrace
import (
"runtime"
"sync"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/pool"
)
var _stacktracePool = sync.Pool{
New: func() interface{} {
return &stacktrace{
storage: make([]uintptr, 64),
}
},
}
var _stackPool = pool.New(func() *Stack {
return &Stack{
storage: make([]uintptr, 64),
}
})
type stacktrace struct {
// Stack is a captured stack trace.
type Stack struct {
pcs []uintptr // program counters; always a subslice of storage
frames *runtime.Frames
@ -50,30 +51,30 @@ type stacktrace struct {
storage []uintptr
}
// stacktraceDepth specifies how deep of a stack trace should be captured.
type stacktraceDepth int
// Depth specifies how deep of a stack trace should be captured.
type Depth int
const (
// stacktraceFirst captures only the first frame.
stacktraceFirst stacktraceDepth = iota
// First captures only the first frame.
First Depth = iota
// stacktraceFull captures the entire call stack, allocating more
// Full captures the entire call stack, allocating more
// storage for it if needed.
stacktraceFull
Full
)
// captureStacktrace captures a stack trace of the specified depth, skipping
// Capture captures a stack trace of the specified depth, skipping
// the provided number of frames. skip=0 identifies the caller of
// captureStacktrace.
// Capture.
//
// The caller must call Free on the returned stacktrace after using it.
func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
stack := _stacktracePool.Get().(*stacktrace)
func Capture(skip int, depth Depth) *Stack {
stack := _stackPool.Get()
switch depth {
case stacktraceFirst:
case First:
stack.pcs = stack.storage[:1]
case stacktraceFull:
case Full:
stack.pcs = stack.storage
}
@ -87,7 +88,7 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
// runtime.Callers truncates the recorded stacktrace if there is no
// room in the provided slice. For the full stack trace, keep expanding
// storage until there are fewer frames than there is room.
if depth == stacktraceFull {
if depth == Full {
pcs := stack.pcs
for numFrames == len(pcs) {
pcs = make([]uintptr, len(pcs)*2)
@ -109,52 +110,56 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
// Free releases resources associated with this stacktrace
// and returns it back to the pool.
func (st *stacktrace) Free() {
func (st *Stack) Free() {
st.frames = nil
st.pcs = nil
_stacktracePool.Put(st)
_stackPool.Put(st)
}
// Count reports the total number of frames in this stacktrace.
// Count DOES NOT change as Next is called.
func (st *stacktrace) Count() int {
func (st *Stack) Count() int {
return len(st.pcs)
}
// Next returns the next frame in the stack trace,
// and a boolean indicating whether there are more after it.
func (st *stacktrace) Next() (_ runtime.Frame, more bool) {
func (st *Stack) Next() (_ runtime.Frame, more bool) {
return st.frames.Next()
}
func takeStacktrace(skip int) string {
stack := captureStacktrace(skip+1, stacktraceFull)
// Take returns a string representation of the current stacktrace.
//
// skip is the number of frames to skip before recording the stack trace.
// skip=0 identifies the caller of Take.
func Take(skip int) string {
stack := Capture(skip+1, Full)
defer stack.Free()
buffer := bufferpool.Get()
defer buffer.Free()
stackfmt := newStackFormatter(buffer)
stackfmt := NewFormatter(buffer)
stackfmt.FormatStack(stack)
return buffer.String()
}
// stackFormatter formats a stack trace into a readable string representation.
type stackFormatter struct {
// Formatter formats a stack trace into a readable string representation.
type Formatter struct {
b *buffer.Buffer
nonEmpty bool // whehther we've written at least one frame already
}
// newStackFormatter builds a new stackFormatter.
func newStackFormatter(b *buffer.Buffer) stackFormatter {
return stackFormatter{b: b}
// NewFormatter builds a new Formatter.
func NewFormatter(b *buffer.Buffer) Formatter {
return Formatter{b: b}
}
// FormatStack formats all remaining frames in the provided stacktrace -- minus
// the final runtime.main/runtime.goexit frame.
func (sf *stackFormatter) FormatStack(stack *stacktrace) {
func (sf *Formatter) FormatStack(stack *Stack) {
// Note: On the last iteration, frames.Next() returns false, with a valid
// frame, but we ignore this frame. The last frame is a a runtime frame which
// frame, but we ignore this frame. The last frame is a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit.
for frame, more := stack.Next(); more; frame, more = stack.Next() {
sf.FormatFrame(frame)
@ -162,7 +167,7 @@ func (sf *stackFormatter) FormatStack(stack *stacktrace) {
}
// FormatFrame formats the given frame.
func (sf *stackFormatter) FormatFrame(frame runtime.Frame) {
func (sf *Formatter) FormatFrame(frame runtime.Frame) {
if sf.nonEmpty {
sf.b.AppendByte('\n')
}

12
vendor/go.uber.org/zap/level.go generated vendored
View File

@ -21,7 +21,9 @@
package zap
import (
"go.uber.org/atomic"
"sync/atomic"
"go.uber.org/zap/internal"
"go.uber.org/zap/zapcore"
)
@ -70,12 +72,14 @@ type AtomicLevel struct {
l *atomic.Int32
}
var _ internal.LeveledEnabler = AtomicLevel{}
// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
// enabled.
func NewAtomicLevel() AtomicLevel {
return AtomicLevel{
l: atomic.NewInt32(int32(InfoLevel)),
}
lvl := AtomicLevel{l: new(atomic.Int32)}
lvl.l.Store(int32(InfoLevel))
return lvl
}
// NewAtomicLevelAt is a convenience function that creates an AtomicLevel

103
vendor/go.uber.org/zap/logger.go generated vendored
View File

@ -22,11 +22,12 @@ package zap
import (
"fmt"
"io/ioutil"
"io"
"os"
"strings"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@ -42,7 +43,7 @@ type Logger struct {
development bool
addCaller bool
onFatal zapcore.CheckWriteAction // default is WriteThenFatal
onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
errorOutput zapcore.WriteSyncer
@ -85,7 +86,7 @@ func New(core zapcore.Core, options ...Option) *Logger {
func NewNop() *Logger {
return &Logger{
core: zapcore.NewNopCore(),
errorOutput: zapcore.AddSync(ioutil.Discard),
errorOutput: zapcore.AddSync(io.Discard),
addStack: zapcore.FatalLevel + 1,
clock: zapcore.DefaultClock,
}
@ -107,6 +108,19 @@ func NewDevelopment(options ...Option) (*Logger, error) {
return NewDevelopmentConfig().Build(options...)
}
// Must is a helper that wraps a call to a function returning (*Logger, error)
// and panics if the error is non-nil. It is intended for use in variable
// initialization such as:
//
// var logger = zap.Must(zap.NewProduction())
func Must(logger *Logger, err error) *Logger {
if err != nil {
panic(err)
}
return logger
}
// NewExample builds a Logger that's designed for use in zap's testable
// examples. It writes DebugLevel and above logs to standard out as JSON, but
// omits the timestamp and calling function to keep example output
@ -160,7 +174,8 @@ func (log *Logger) WithOptions(opts ...Option) *Logger {
}
// With creates a child logger and adds structured context to it. Fields added
// to the child don't affect the parent, and vice versa.
// to the child don't affect the parent, and vice versa. Any fields that
// require evaluation (such as Objects) are evaluated upon invocation of With.
func (log *Logger) With(fields ...Field) *Logger {
if len(fields) == 0 {
return log
@ -170,6 +185,35 @@ func (log *Logger) With(fields ...Field) *Logger {
return l
}
// WithLazy creates a child logger and adds structured context to it lazily.
//
// The fields are evaluated only if the logger is further chained with [With]
// or is written to with any of the log level methods.
// Until that occurs, the logger may retain references to objects inside the fields,
// and logging will reflect the state of an object at the time of logging,
// not the time of WithLazy().
//
// WithLazy provides a worthwhile performance optimization for contextual loggers
// when the likelihood of using the child logger is low,
// such as error paths and rarely taken branches.
//
// Similar to [With], fields added to the child don't affect the parent, and vice versa.
func (log *Logger) WithLazy(fields ...Field) *Logger {
if len(fields) == 0 {
return log
}
return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewLazyWith(core, fields)
}))
}
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
func (log *Logger) Level() zapcore.Level {
return zapcore.LevelOf(log.core)
}
// Check returns a CheckedEntry if logging a message at the specified level
// is enabled. It's a completely optional optimization; in high-performance
// applications, Check can help avoid allocating a slice to hold fields.
@ -177,6 +221,16 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return log.check(lvl, msg)
}
// Log logs a message at the specified level. The message includes any fields
// passed at the log site, as well as any fields accumulated on the logger.
// Any Fields that require evaluation (such as Objects) are evaluated upon
// invocation of Log.
func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
if ce := log.check(lvl, msg); ce != nil {
ce.Write(fields...)
}
}
// Debug logs a message at DebugLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Debug(msg string, fields ...Field) {
@ -253,9 +307,15 @@ func (log *Logger) Core() zapcore.Core {
return log.core
}
// Name returns the Logger's underlying name,
// or an empty string if the logger is unnamed.
func (log *Logger) Name() string {
return log.name
}
func (log *Logger) clone() *Logger {
copy := *log
return &copy
clone := *log
return &clone
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
@ -285,18 +345,27 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
ce = ce.Should(ent, zapcore.WriteThenPanic)
ce = ce.After(ent, zapcore.WriteThenPanic)
case zapcore.FatalLevel:
onFatal := log.onFatal
// Noop is the default value for CheckWriteAction, and it leads to
// continued execution after a Fatal which is unexpected.
if onFatal == zapcore.WriteThenNoop {
// nil or WriteThenNoop will lead to continued execution after
// a Fatal log entry, which is unexpected. For example,
//
// f, err := os.Open(..)
// if err != nil {
// log.Fatal("cannot open", zap.Error(err))
// }
// fmt.Println(f.Name())
//
// The f.Name() will panic if we continue execution after the
// log.Fatal.
if onFatal == nil || onFatal == zapcore.WriteThenNoop {
onFatal = zapcore.WriteThenFatal
}
ce = ce.Should(ent, onFatal)
ce = ce.After(ent, onFatal)
case zapcore.DPanicLevel:
if log.development {
ce = ce.Should(ent, zapcore.WriteThenPanic)
ce = ce.After(ent, zapcore.WriteThenPanic)
}
}
@ -317,17 +386,17 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Adding the caller or stack trace requires capturing the callers of
// this function. We'll share information between these two.
stackDepth := stacktraceFirst
stackDepth := stacktrace.First
if addStack {
stackDepth = stacktraceFull
stackDepth = stacktrace.Full
}
stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth)
stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
defer stack.Free()
if stack.Count() == 0 {
if log.addCaller {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
log.errorOutput.Sync()
_ = log.errorOutput.Sync()
}
return ce
}
@ -348,7 +417,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
buffer := bufferpool.Get()
defer buffer.Free()
stackfmt := newStackFormatter(buffer)
stackfmt := stacktrace.NewFormatter(buffer)
// We've already extracted the first frame, so format that
// separately and defer to stackfmt for the rest.

21
vendor/go.uber.org/zap/options.go generated vendored
View File

@ -133,9 +133,28 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
}
// OnFatal sets the action to take on fatal logs.
//
// Deprecated: Use [WithFatalHook] instead.
func OnFatal(action zapcore.CheckWriteAction) Option {
return WithFatalHook(action)
}
// WithFatalHook sets a CheckWriteHook to run on fatal logs.
// Zap will call this hook after writing a log statement with a Fatal level.
//
// For example, the following builds a logger that will exit the current
// goroutine after writing a fatal log message, but it will not exit the
// program.
//
// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
//
// It is important that the provided CheckWriteHook stops the control flow at
// the current statement to meet expectations of callers of the logger.
// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
// minimum.
func WithFatalHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) {
log.onFatal = action
log.onFatal = hook
})
}

101
vendor/go.uber.org/zap/sink.go generated vendored
View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -26,6 +26,7 @@ import (
"io"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
@ -34,23 +35,7 @@ import (
const schemeFile = "file"
var (
_sinkMutex sync.RWMutex
_sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
)
func init() {
resetSinkRegistry()
}
func resetSinkRegistry() {
_sinkMutex.Lock()
defer _sinkMutex.Unlock()
_sinkFactories = map[string]func(*url.URL) (Sink, error){
schemeFile: newFileSink,
}
}
var _sinkRegistry = newSinkRegistry()
// Sink defines the interface to write to and close logger destinations.
type Sink interface {
@ -58,10 +43,6 @@ type Sink interface {
io.Closer
}
type nopCloserSink struct{ zapcore.WriteSyncer }
func (nopCloserSink) Close() error { return nil }
type errSinkNotFound struct {
scheme string
}
@ -70,16 +51,30 @@ func (e *errSinkNotFound) Error() string {
return fmt.Sprintf("no sink found for scheme %q", e.scheme)
}
// RegisterSink registers a user-supplied factory for all sinks with a
// particular scheme.
//
// All schemes must be ASCII, valid under section 3.1 of RFC 3986
// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
// have a factory registered. Zap automatically registers a factory for the
// "file" scheme.
func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
_sinkMutex.Lock()
defer _sinkMutex.Unlock()
type nopCloserSink struct{ zapcore.WriteSyncer }
func (nopCloserSink) Close() error { return nil }
type sinkRegistry struct {
mu sync.Mutex
factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
}
func newSinkRegistry() *sinkRegistry {
sr := &sinkRegistry{
factories: make(map[string]func(*url.URL) (Sink, error)),
openFile: os.OpenFile,
}
// Infallible operation: the registry is empty, so we can't have a conflict.
_ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
return sr
}
// RegisterScheme registers the given factory for the specific scheme.
func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
sr.mu.Lock()
defer sr.mu.Unlock()
if scheme == "" {
return errors.New("can't register a sink factory for empty string")
@ -88,14 +83,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
if err != nil {
return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
}
if _, ok := _sinkFactories[normalized]; ok {
if _, ok := sr.factories[normalized]; ok {
return fmt.Errorf("sink factory already registered for scheme %q", normalized)
}
_sinkFactories[normalized] = factory
sr.factories[normalized] = factory
return nil
}
func newSink(rawURL string) (Sink, error) {
func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
// URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
// the drive, and path is unset unless `c:/log.txt` is used.
// To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
// filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
if filepath.IsAbs(rawURL) {
return sr.newFileSinkFromPath(rawURL)
}
u, err := url.Parse(rawURL)
if err != nil {
return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
@ -104,16 +107,27 @@ func newSink(rawURL string) (Sink, error) {
u.Scheme = schemeFile
}
_sinkMutex.RLock()
factory, ok := _sinkFactories[u.Scheme]
_sinkMutex.RUnlock()
sr.mu.Lock()
factory, ok := sr.factories[u.Scheme]
sr.mu.Unlock()
if !ok {
return nil, &errSinkNotFound{u.Scheme}
}
return factory(u)
}
func newFileSink(u *url.URL) (Sink, error) {
// RegisterSink registers a user-supplied factory for all sinks with a
// particular scheme.
//
// All schemes must be ASCII, valid under section 0.1 of RFC 3986
// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
// have a factory registered. Zap automatically registers a factory for the
// "file" scheme.
func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
return _sinkRegistry.RegisterSink(scheme, factory)
}
func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
if u.User != nil {
return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
}
@ -130,13 +144,18 @@ func newFileSink(u *url.URL) (Sink, error) {
if hn := u.Hostname(); hn != "" && hn != "localhost" {
return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
}
switch u.Path {
return sr.newFileSinkFromPath(u.Path)
}
func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
switch path {
case "stdout":
return nopCloserSink{os.Stdout}, nil
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
}
func normalizeScheme(s string) (string, error) {

200
vendor/go.uber.org/zap/sugar.go generated vendored
View File

@ -31,6 +31,7 @@ import (
const (
_oddNumberErrMsg = "Ignored key without a value."
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
_multipleErrMsg = "Multiple errors without a key."
)
// A SugaredLogger wraps the base Logger functionality in a slower, but less
@ -38,10 +39,19 @@ const (
// method.
//
// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
// For each log level, it exposes three methods: one for loosely-typed
// structured logging, one for println-style formatting, and one for
// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
// output with Infow ("info with" structured context), Info, or Infof.
// For each log level, it exposes four methods:
//
// - methods named after the log level for log.Print-style logging
// - methods ending in "w" for loosely-typed structured logging
// - methods ending in "f" for log.Printf-style logging
// - methods ending in "ln" for log.Println-style logging
//
// For example, the methods for InfoLevel are:
//
// Info(...any) Print-style logging
// Infow(...any) Structured logging (read as "info with")
// Infof(string, ...any) Printf-style logging
// Infoln(...any) Println-style logging
type SugaredLogger struct {
base *Logger
}
@ -61,27 +71,40 @@ func (s *SugaredLogger) Named(name string) *SugaredLogger {
return &SugaredLogger{base: s.base.Named(name)}
}
// WithOptions clones the current SugaredLogger, applies the supplied Options,
// and returns the result. It's safe to use concurrently.
func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
base := s.base.clone()
for _, opt := range opts {
opt.apply(base)
}
return &SugaredLogger{base: base}
}
// With adds a variadic number of fields to the logging context. It accepts a
// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
// processing pairs, the first element of the pair is used as the field key
// and the second as the field value.
//
// For example,
// sugaredLogger.With(
// "hello", "world",
// "failure", errors.New("oh no"),
// Stack(),
// "count", 42,
// "user", User{Name: "alice"},
// )
//
// sugaredLogger.With(
// "hello", "world",
// "failure", errors.New("oh no"),
// Stack(),
// "count", 42,
// "user", User{Name: "alice"},
// )
//
// is the equivalent of
// unsugared.With(
// String("hello", "world"),
// String("failure", "oh no"),
// Stack(),
// Int("count", 42),
// Object("user", User{Name: "alice"}),
// )
//
// unsugared.With(
// String("hello", "world"),
// String("failure", "oh no"),
// Stack(),
// Int("count", 42),
// Object("user", User{Name: "alice"}),
// )
//
// Note that the keys in key-value pairs should be strings. In development,
// passing a non-string key panics. In production, the logger is more
@ -92,74 +115,95 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
// Debug uses fmt.Sprint to construct and log a message.
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
func (s *SugaredLogger) Level() zapcore.Level {
return zapcore.LevelOf(s.base.core)
}
// Debug logs the provided arguments at [DebugLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Debug(args ...interface{}) {
s.log(DebugLevel, "", args, nil)
}
// Info uses fmt.Sprint to construct and log a message.
// Info logs the provided arguments at [InfoLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Info(args ...interface{}) {
s.log(InfoLevel, "", args, nil)
}
// Warn uses fmt.Sprint to construct and log a message.
// Warn logs the provided arguments at [WarnLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Warn(args ...interface{}) {
s.log(WarnLevel, "", args, nil)
}
// Error uses fmt.Sprint to construct and log a message.
// Error logs the provided arguments at [ErrorLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Error(args ...interface{}) {
s.log(ErrorLevel, "", args, nil)
}
// DPanic uses fmt.Sprint to construct and log a message. In development, the
// logger then panics. (See DPanicLevel for details.)
// DPanic logs the provided arguments at [DPanicLevel].
// In development, the logger then panics. (See [DPanicLevel] for details.)
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) DPanic(args ...interface{}) {
s.log(DPanicLevel, "", args, nil)
}
// Panic uses fmt.Sprint to construct and log a message, then panics.
// Panic constructs a message with the provided arguments and panics.
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Panic(args ...interface{}) {
s.log(PanicLevel, "", args, nil)
}
// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
// Fatal constructs a message with the provided arguments and calls os.Exit.
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Fatal(args ...interface{}) {
s.log(FatalLevel, "", args, nil)
}
// Debugf uses fmt.Sprintf to log a templated message.
// Debugf formats the message according to the format specifier
// and logs it at [DebugLevel].
func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
s.log(DebugLevel, template, args, nil)
}
// Infof uses fmt.Sprintf to log a templated message.
// Infof formats the message according to the format specifier
// and logs it at [InfoLevel].
func (s *SugaredLogger) Infof(template string, args ...interface{}) {
s.log(InfoLevel, template, args, nil)
}
// Warnf uses fmt.Sprintf to log a templated message.
// Warnf formats the message according to the format specifier
// and logs it at [WarnLevel].
func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
s.log(WarnLevel, template, args, nil)
}
// Errorf uses fmt.Sprintf to log a templated message.
// Errorf formats the message according to the format specifier
// and logs it at [ErrorLevel].
func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
s.log(ErrorLevel, template, args, nil)
}
// DPanicf uses fmt.Sprintf to log a templated message. In development, the
// logger then panics. (See DPanicLevel for details.)
// DPanicf formats the message according to the format specifier
// and logs it at [DPanicLevel].
// In development, the logger then panics. (See [DPanicLevel] for details.)
func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
s.log(DPanicLevel, template, args, nil)
}
// Panicf uses fmt.Sprintf to log a templated message, then panics.
// Panicf formats the message according to the format specifier
// and panics.
func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
s.log(PanicLevel, template, args, nil)
}
// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
// Fatalf formats the message according to the format specifier
// and calls os.Exit.
func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
s.log(FatalLevel, template, args, nil)
}
@ -168,7 +212,8 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
// pairs are treated as they are in With.
//
// When debug-level logging is disabled, this is much faster than
// s.With(keysAndValues).Debug(msg)
//
// s.With(keysAndValues).Debug(msg)
func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
s.log(DebugLevel, msg, nil, keysAndValues)
}
@ -210,11 +255,55 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
s.log(FatalLevel, msg, nil, keysAndValues)
}
// Debugln logs a message at [DebugLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Debugln(args ...interface{}) {
s.logln(DebugLevel, args, nil)
}
// Infoln logs a message at [InfoLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Infoln(args ...interface{}) {
s.logln(InfoLevel, args, nil)
}
// Warnln logs a message at [WarnLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Warnln(args ...interface{}) {
s.logln(WarnLevel, args, nil)
}
// Errorln logs a message at [ErrorLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Errorln(args ...interface{}) {
s.logln(ErrorLevel, args, nil)
}
// DPanicln logs a message at [DPanicLevel].
// In development, the logger then panics. (See [DPanicLevel] for details.)
// Spaces are always added between arguments.
func (s *SugaredLogger) DPanicln(args ...interface{}) {
s.logln(DPanicLevel, args, nil)
}
// Panicln logs a message at [PanicLevel] and panics.
// Spaces are always added between arguments.
func (s *SugaredLogger) Panicln(args ...interface{}) {
s.logln(PanicLevel, args, nil)
}
// Fatalln logs a message at [FatalLevel] and calls os.Exit.
// Spaces are always added between arguments.
func (s *SugaredLogger) Fatalln(args ...interface{}) {
s.logln(FatalLevel, args, nil)
}
// Sync flushes any buffered log entries.
func (s *SugaredLogger) Sync() error {
return s.base.Sync()
}
// log message with Sprint, Sprintf, or neither.
func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
// If logging at this level is completely disabled, skip the overhead of
// string formatting.
@ -228,6 +317,18 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf
}
}
// logln message with Sprintln
func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
return
}
msg := getMessageln(fmtArgs)
if ce := s.base.Check(lvl, msg); ce != nil {
ce.Write(s.sweetenFields(context)...)
}
}
// getMessage format with Sprint, Sprintf, or neither.
func getMessage(template string, fmtArgs []interface{}) string {
if len(fmtArgs) == 0 {
@ -246,15 +347,24 @@ func getMessage(template string, fmtArgs []interface{}) string {
return fmt.Sprint(fmtArgs...)
}
// getMessageln format with Sprintln.
func getMessageln(fmtArgs []interface{}) string {
msg := fmt.Sprintln(fmtArgs...)
return msg[:len(msg)-1]
}
func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
if len(args) == 0 {
return nil
}
// Allocate enough space for the worst case; if users pass only structured
// fields, we shouldn't penalize them with extra allocations.
fields := make([]Field, 0, len(args))
var invalid invalidPairs
var (
// Allocate enough space for the worst case; if users pass only structured
// fields, we shouldn't penalize them with extra allocations.
fields = make([]Field, 0, len(args))
invalid invalidPairs
seenError bool
)
for i := 0; i < len(args); {
// This is a strongly-typed field. Consume it and move on.
@ -264,6 +374,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
continue
}
// If it is an error, consume it and move on.
if err, ok := args[i].(error); ok {
if !seenError {
seenError = true
fields = append(fields, Error(err))
} else {
s.base.Error(_multipleErrMsg, Error(err))
}
i++
continue
}
// Make sure this element isn't a dangling key.
if i == len(args)-1 {
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))

23
vendor/go.uber.org/zap/writer.go generated vendored
View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -23,7 +23,6 @@ package zap
import (
"fmt"
"io"
"io/ioutil"
"go.uber.org/zap/zapcore"
@ -49,40 +48,40 @@ import (
// os.Stdout and os.Stderr. When specified without a scheme, relative file
// paths also work.
func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
writers, close, err := open(paths)
writers, closeAll, err := open(paths)
if err != nil {
return nil, nil, err
}
writer := CombineWriteSyncers(writers...)
return writer, close, nil
return writer, closeAll, nil
}
func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
writers := make([]zapcore.WriteSyncer, 0, len(paths))
closers := make([]io.Closer, 0, len(paths))
close := func() {
closeAll := func() {
for _, c := range closers {
c.Close()
_ = c.Close()
}
}
var openErr error
for _, path := range paths {
sink, err := newSink(path)
sink, err := _sinkRegistry.newSink(path)
if err != nil {
openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
continue
}
writers = append(writers, sink)
closers = append(closers, sink)
}
if openErr != nil {
close()
return writers, nil, openErr
closeAll()
return nil, nil, openErr
}
return writers, close, nil
return writers, closeAll, nil
}
// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
@ -93,7 +92,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
if len(writers) == 0 {
return zapcore.AddSync(ioutil.Discard)
return zapcore.AddSync(io.Discard)
}
return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
}

View File

@ -43,6 +43,37 @@ const (
//
// BufferedWriteSyncer is safe for concurrent use. You don't need to use
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
//
// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
// destination (*os.File is a valid WriteSyncer), wrap it with
// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
// object.
//
// func main() {
// ws := ... // your log destination
// bws := &zapcore.BufferedWriteSyncer{WS: ws}
// defer bws.Stop()
//
// // ...
// core := zapcore.NewCore(enc, bws, lvl)
// logger := zap.New(core)
//
// // ...
// }
//
// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
// waiting at most 30 seconds between flushes.
// You can customize these parameters by setting the Size or FlushInterval
// fields.
// For example, the following buffers up to 512 kB of logs before flushing them
// to Stderr, with a maximum of one minute between each flush.
//
// ws := &BufferedWriteSyncer{
// WS: os.Stderr,
// Size: 512 * 1024, // 512 kB
// FlushInterval: time.Minute,
// }
// defer ws.Stop()
type BufferedWriteSyncer struct {
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
// writes.

View File

@ -22,20 +22,20 @@ package zapcore
import (
"fmt"
"sync"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/pool"
)
var _sliceEncoderPool = sync.Pool{
New: func() interface{} {
return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
},
}
var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder {
return &sliceArrayEncoder{
elems: make([]interface{}, 0, 2),
}
})
func getSliceEncoder() *sliceArrayEncoder {
return _sliceEncoderPool.Get().(*sliceArrayEncoder)
return _sliceEncoderPool.Get()
}
func putSliceEncoder(e *sliceArrayEncoder) {

View File

@ -69,6 +69,15 @@ type ioCore struct {
out WriteSyncer
}
var (
_ Core = (*ioCore)(nil)
_ leveledEnabler = (*ioCore)(nil)
)
func (c *ioCore) Level() Level {
return LevelOf(c.LevelEnabler)
}
func (c *ioCore) With(fields []Field) Core {
clone := c.clone()
addFields(clone.enc, fields)
@ -93,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error {
return err
}
if ent.Level > ErrorLevel {
// Since we may be crashing the program, sync the output. Ignore Sync
// errors, pending a clean solution to issue #370.
c.Sync()
// Since we may be crashing the program, sync the output.
// Ignore Sync errors, pending a clean solution to issue #370.
_ = c.Sync()
}
return nil
}

View File

@ -188,10 +188,13 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error {
// UnmarshalYAML unmarshals YAML to a TimeEncoder.
// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
// timeEncoder:
// layout: 06/01/02 03:04pm
//
// timeEncoder:
// layout: 06/01/02 03:04pm
//
// If value is string, it uses UnmarshalText.
// timeEncoder: iso8601
//
// timeEncoder: iso8601
func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
var o struct {
Layout string `json:"layout" yaml:"layout"`

View File

@ -24,26 +24,23 @@ import (
"fmt"
"runtime"
"strings"
"sync"
"time"
"go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit"
"go.uber.org/multierr"
"go.uber.org/zap/internal/pool"
)
var (
_cePool = sync.Pool{New: func() interface{} {
// Pre-allocate some space for cores.
return &CheckedEntry{
cores: make([]Core, 4),
}
}}
)
var _cePool = pool.New(func() *CheckedEntry {
// Pre-allocate some space for cores.
return &CheckedEntry{
cores: make([]Core, 4),
}
})
func getCheckedEntry() *CheckedEntry {
ce := _cePool.Get().(*CheckedEntry)
ce := _cePool.Get()
ce.reset()
return ce
}
@ -152,6 +149,27 @@ type Entry struct {
Stack string
}
// CheckWriteHook is a custom action that may be executed after an entry is
// written.
//
// Register one on a CheckedEntry with the After method.
//
// if ce := logger.Check(...); ce != nil {
// ce = ce.After(hook)
// ce.Write(...)
// }
//
// You can configure the hook for Fatal log statements at the logger level with
// the zap.WithFatalHook option.
type CheckWriteHook interface {
// OnWrite is invoked with the CheckedEntry that was written and a list
// of fields added with that entry.
//
// The list of fields DOES NOT include fields that were already added
// to the logger with the With method.
OnWrite(*CheckedEntry, []Field)
}
// CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8
@ -164,21 +182,36 @@ const (
WriteThenGoexit
// WriteThenPanic causes a panic after Write.
WriteThenPanic
// WriteThenFatal causes a fatal os.Exit after Write.
// WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal
)
// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
// with the new CheckWriteHook interface which deprecates CheckWriteAction.
func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
switch a {
case WriteThenGoexit:
runtime.Goexit()
case WriteThenPanic:
panic(ce.Message)
case WriteThenFatal:
exit.With(1)
}
}
var _ CheckWriteHook = CheckWriteAction(0)
// CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it.
//
// CheckedEntry references should be created by calling AddCore or Should on a
// CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method.
type CheckedEntry struct {
Entry
ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse
should CheckWriteAction
after CheckWriteHook
cores []Core
}
@ -186,7 +219,7 @@ func (ce *CheckedEntry) reset() {
ce.Entry = Entry{}
ce.ErrorOutput = nil
ce.dirty = false
ce.should = WriteThenNoop
ce.after = nil
for i := range ce.cores {
// don't keep references to cores
ce.cores[i] = nil
@ -209,7 +242,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
// CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites.
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
ce.ErrorOutput.Sync()
_ = ce.ErrorOutput.Sync() // ignore error
}
return
}
@ -221,20 +254,14 @@ func (ce *CheckedEntry) Write(fields ...Field) {
}
if err != nil && ce.ErrorOutput != nil {
fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
ce.ErrorOutput.Sync()
_ = ce.ErrorOutput.Sync() // ignore error
}
should, msg := ce.should, ce.Message
hook := ce.after
if hook != nil {
hook.OnWrite(ce, fields)
}
putCheckedEntry(ce)
switch should {
case WriteThenPanic:
panic(msg)
case WriteThenFatal:
exit.Exit()
case WriteThenGoexit:
runtime.Goexit()
}
}
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@ -252,11 +279,20 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
//
// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
return ce.After(ent, should)
}
// After sets this CheckEntry's CheckWriteHook, which will be called after this
// log entry has been written. It's safe to call this on nil CheckedEntry
// references.
func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
ce.should = should
ce.after = hook
return ce
}

View File

@ -23,7 +23,8 @@ package zapcore
import (
"fmt"
"reflect"
"sync"
"go.uber.org/zap/internal/pool"
)
// Encodes the given error into fields of an object. A field with the given
@ -36,13 +37,13 @@ import (
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of.
//
// {
// "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [
// ...
// ],
// }
// {
// "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [
// ...
// ],
// }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling
// the Error() method
@ -97,15 +98,18 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
}
el := newErrArrayElem(errs[i])
arr.AppendObject(el)
err := arr.AppendObject(el)
el.Free()
if err != nil {
return err
}
}
return nil
}
var _errArrayElemPool = sync.Pool{New: func() interface{} {
var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
}}
})
// Encodes any error into a {"error": ...} re-using the same errors logic.
//
@ -113,7 +117,7 @@ var _errArrayElemPool = sync.Pool{New: func() interface{} {
type errArrayElem struct{ err error }
func newErrArrayElem(err error) *errArrayElem {
e := _errArrayElemPool.Get().(*errArrayElem)
e := _errArrayElemPool.Get()
e.err = err
return e
}

View File

@ -27,6 +27,11 @@ type hooked struct {
funcs []func(Entry) error
}
var (
_ Core = (*hooked)(nil)
_ leveledEnabler = (*hooked)(nil)
)
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
}
}
func (h *hooked) Level() Level {
return LevelOf(h.Core)
}
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the

View File

@ -27,6 +27,11 @@ type levelFilterCore struct {
level LevelEnabler
}
var (
_ Core = (*levelFilterCore)(nil)
_ leveledEnabler = (*levelFilterCore)(nil)
)
// NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level,
@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool {
return c.level.Enabled(lvl)
}
func (c *levelFilterCore) Level() Level {
return LevelOf(c.level)
}
func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level}
}

View File

@ -23,24 +23,20 @@ package zapcore
import (
"encoding/base64"
"math"
"sync"
"time"
"unicode/utf8"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/pool"
)
// For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef"
var _jsonPool = sync.Pool{New: func() interface{} {
var _jsonPool = pool.New(func() *jsonEncoder {
return &jsonEncoder{}
}}
func getJSONEncoder() *jsonEncoder {
return _jsonPool.Get().(*jsonEncoder)
}
})
func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil {
@ -71,7 +67,9 @@ type jsonEncoder struct {
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
// {"foo":"bar","foo":"baz"}
//
// {"foo":"bar","foo":"baz"}
//
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
@ -352,7 +350,7 @@ func (enc *jsonEncoder) Clone() Encoder {
}
func (enc *jsonEncoder) clone() *jsonEncoder {
clone := getJSONEncoder()
clone := _jsonPool.Get()
clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces
@ -488,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRuneInString(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.AppendString(s[i : i+size])
i += size
}
safeAppendStringLike(
(*buffer.Buffer).AppendString,
utf8.DecodeRuneInString,
enc.buf,
s,
)
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
safeAppendStringLike(
(*buffer.Buffer).AppendBytes,
utf8.DecodeRune,
enc.buf,
s,
)
}
// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
// It appends a string or byte slice to the buffer, escaping all special characters.
func safeAppendStringLike[S []byte | string](
// appendTo appends this string-like object to the buffer.
appendTo func(*buffer.Buffer, S),
// decodeRune decodes the next rune from the string-like object
// and returns its value and width in bytes.
decodeRune func(S) (rune, int),
buf *buffer.Buffer,
s S,
) {
// The encoding logic below works by skipping over characters
// that can be safely copied as-is,
// until a character is found that needs special handling.
// At that point, we copy everything we've seen so far,
// and then handle that special character.
//
// last is the index of the last byte that was copied to the buffer.
last := 0
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRune(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.Write(s[i : i+size])
i += size
}
}
if s[i] >= utf8.RuneSelf {
// Character >= RuneSelf may be part of a multi-byte rune.
// They need to be decoded before we can decide how to handle them.
r, size := decodeRune(s[i:])
if r != utf8.RuneError || size != 1 {
// No special handling required.
// Skip over this rune and continue.
i += size
continue
}
// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
if b >= utf8.RuneSelf {
return false
}
if 0x20 <= b && b != '\\' && b != '"' {
enc.buf.AppendByte(b)
return true
}
switch b {
case '\\', '"':
enc.buf.AppendByte('\\')
enc.buf.AppendByte(b)
case '\n':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('n')
case '\r':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('r')
case '\t':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
enc.buf.AppendString(`\u00`)
enc.buf.AppendByte(_hex[b>>4])
enc.buf.AppendByte(_hex[b&0xF])
}
return true
}
// Invalid UTF-8 sequence.
// Replace it with the Unicode replacement character.
appendTo(buf, s[last:i])
buf.AppendString(`\ufffd`)
func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
if r == utf8.RuneError && size == 1 {
enc.buf.AppendString(`\ufffd`)
return true
i++
last = i
} else {
// Character < RuneSelf is a single-byte UTF-8 rune.
if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
// No escaping necessary.
// Skip over this character and continue.
i++
continue
}
// This character needs to be escaped.
appendTo(buf, s[last:i])
switch s[i] {
case '\\', '"':
buf.AppendByte('\\')
buf.AppendByte(s[i])
case '\n':
buf.AppendByte('\\')
buf.AppendByte('n')
case '\r':
buf.AppendByte('\\')
buf.AppendByte('r')
case '\t':
buf.AppendByte('\\')
buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
buf.AppendString(`\u00`)
buf.AppendByte(_hex[s[i]>>4])
buf.AppendByte(_hex[s[i]&0xF])
}
i++
last = i
}
}
return false
// add remaining
appendTo(buf, s[last:])
}

54
vendor/go.uber.org/zap/zapcore/lazy_with.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import "sync"
type lazyWithCore struct {
Core
sync.Once
fields []Field
}
// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
// the logger is written to (or is further chained in a lon-lazy manner).
func NewLazyWith(core Core, fields []Field) Core {
return &lazyWithCore{
Core: core,
fields: fields,
}
}
func (d *lazyWithCore) initOnce() {
d.Once.Do(func() {
d.Core = d.Core.With(d.fields)
})
}
func (d *lazyWithCore) With(fields []Field) Core {
d.initOnce()
return d.Core.With(fields)
}
func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
d.initOnce()
return d.Core.Check(e, ce)
}

View File

@ -53,6 +53,11 @@ const (
_minLevel = DebugLevel
_maxLevel = FatalLevel
// InvalidLevel is an invalid value for Level.
//
// Core implementations may panic if they see messages of this level.
InvalidLevel = _maxLevel + 1
)
// ParseLevel parses a level based on the lower-case or all-caps ASCII
@ -67,6 +72,43 @@ func ParseLevel(text string) (Level, error) {
return level, err
}
type leveledEnabler interface {
LevelEnabler
Level() Level
}
// LevelOf reports the minimum enabled log level for the given LevelEnabler
// from Zap's supported log levels, or [InvalidLevel] if none of them are
// enabled.
//
// A LevelEnabler may implement a 'Level() Level' method to override the
// behavior of this function.
//
// func (c *core) Level() Level {
// return c.currentLevel
// }
//
// It is recommended that [Core] implementations that wrap other cores use
// LevelOf to retrieve the level of the wrapped core. For example,
//
// func (c *coreWrapper) Level() Level {
// return zapcore.LevelOf(c.wrappedCore)
// }
func LevelOf(enab LevelEnabler) Level {
if lvler, ok := enab.(leveledEnabler); ok {
return lvler.Level()
}
for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
if enab.Enabled(lvl) {
return lvl
}
}
return InvalidLevel
}
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -21,9 +21,8 @@
package zapcore
import (
"sync/atomic"
"time"
"go.uber.org/atomic"
)
const (
@ -66,16 +65,16 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
tn := t.UnixNano()
resetAfter := c.resetAt.Load()
if resetAfter > tn {
return c.counter.Inc()
return c.counter.Add(1)
}
c.counter.Store(1)
newResetAfter := tn + tick.Nanoseconds()
if !c.resetAt.CAS(resetAfter, newResetAfter) {
if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) {
// We raced with another goroutine trying to reset, and it also reset
// the counter to 1, so we need to reincrement the counter.
return c.counter.Inc()
return c.counter.Add(1)
}
return 1
@ -113,12 +112,12 @@ func nopSamplingHook(Entry, SamplingDecision) {}
// This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs.
//
// var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 {
// dropped.Inc()
// }
// })
// var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 {
// dropped.Inc()
// }
// })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) {
s.hook = hook
@ -135,7 +134,7 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
//
// For example,
//
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
//
// This will log the first 10 log entries with the same level and message
// in a one second interval as-is. Following that, it will allow through
@ -175,6 +174,11 @@ type sampler struct {
hook func(Entry, SamplingDecision)
}
var (
_ Core = (*sampler)(nil)
_ leveledEnabler = (*sampler)(nil)
)
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
@ -192,6 +196,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
return NewSamplerWithOptions(core, tick, first, thereafter)
}
func (s *sampler) Level() Level {
return LevelOf(s.Core)
}
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -24,6 +24,11 @@ import "go.uber.org/multierr"
type multiCore []Core
var (
_ leveledEnabler = multiCore(nil)
_ Core = multiCore(nil)
)
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core {
return clone
}
func (mc multiCore) Level() Level {
minLvl := _maxLevel // mc is never empty
for i := range mc {
if lvl := LevelOf(mc[i]); lvl < minLvl {
minLvl = lvl
}
}
return minLvl
}
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {