chore: upgrade dependencies
This commit is contained in:
119
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/aggregation.go
generated
vendored
Normal file
119
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/aggregation.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
)
|
||||
|
||||
// These interfaces describe the various ways to access state from an
|
||||
// Aggregation.
|
||||
|
||||
type (
|
||||
// Aggregation is an interface returned by the Aggregator
|
||||
// containing an interval of metric data.
|
||||
Aggregation interface {
|
||||
// Kind returns a short identifying string to identify
|
||||
// the Aggregator that was used to produce the
|
||||
// Aggregation (e.g., "Sum").
|
||||
Kind() Kind
|
||||
}
|
||||
|
||||
// Sum returns an aggregated sum.
|
||||
Sum interface {
|
||||
Aggregation
|
||||
Sum() (number.Number, error)
|
||||
}
|
||||
|
||||
// Count returns the number of values that were aggregated.
|
||||
Count interface {
|
||||
Aggregation
|
||||
Count() (uint64, error)
|
||||
}
|
||||
|
||||
// LastValue returns the latest value that was aggregated.
|
||||
LastValue interface {
|
||||
Aggregation
|
||||
LastValue() (number.Number, time.Time, error)
|
||||
}
|
||||
|
||||
// Buckets represents histogram buckets boundaries and counts.
|
||||
//
|
||||
// For a Histogram with N defined boundaries, e.g, [x, y, z].
|
||||
// There are N+1 counts: [-inf, x), [x, y), [y, z), [z, +inf].
|
||||
Buckets struct {
|
||||
// Boundaries are floating point numbers, even when
|
||||
// aggregating integers.
|
||||
Boundaries []float64
|
||||
|
||||
// Counts holds the count in each bucket.
|
||||
Counts []uint64
|
||||
}
|
||||
|
||||
// Histogram returns the count of events in pre-determined buckets.
|
||||
Histogram interface {
|
||||
Aggregation
|
||||
Count() (uint64, error)
|
||||
Sum() (number.Number, error)
|
||||
Histogram() (Buckets, error)
|
||||
}
|
||||
)
|
||||
|
||||
type (
|
||||
// Kind is a short name for the Aggregator that produces an
|
||||
// Aggregation, used for descriptive purpose only. Kind is a
|
||||
// string to allow user-defined Aggregators.
|
||||
//
|
||||
// When deciding how to handle an Aggregation, Exporters are
|
||||
// encouraged to decide based on conversion to the above
|
||||
// interfaces based on strength, not on Kind value, when
|
||||
// deciding how to expose metric data. This enables
|
||||
// user-supplied Aggregators to replace builtin Aggregators.
|
||||
//
|
||||
// For example, test for a Histogram before testing for a
|
||||
// Sum, and so on.
|
||||
Kind string
|
||||
)
|
||||
|
||||
// Kind description constants.
|
||||
const (
|
||||
SumKind Kind = "Sum"
|
||||
HistogramKind Kind = "Histogram"
|
||||
LastValueKind Kind = "Lastvalue"
|
||||
)
|
||||
|
||||
// Sentinel errors for Aggregation interface.
|
||||
var (
|
||||
ErrNegativeInput = fmt.Errorf("negative value is out of range for this instrument")
|
||||
ErrNaNInput = fmt.Errorf("NaN value is an invalid input")
|
||||
ErrInconsistentType = fmt.Errorf("inconsistent aggregator types")
|
||||
|
||||
// ErrNoCumulativeToDelta is returned when requesting delta
|
||||
// export kind for a precomputed sum instrument.
|
||||
ErrNoCumulativeToDelta = fmt.Errorf("cumulative to delta not implemented")
|
||||
|
||||
// ErrNoData is returned when (due to a race with collection)
|
||||
// the Aggregator is check-pointed before the first value is set.
|
||||
// The aggregator should simply be skipped in this case.
|
||||
ErrNoData = fmt.Errorf("no data collected by this aggregator")
|
||||
)
|
||||
|
||||
// String returns the string value of Kind.
|
||||
func (k Kind) String() string {
|
||||
return string(k)
|
||||
}
|
117
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/temporality.go
generated
vendored
Normal file
117
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/temporality.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate stringer -type=Temporality
|
||||
|
||||
package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
// Temporality indicates the temporal aggregation exported by an exporter.
|
||||
// These bits may be OR-d together when multiple exporters are in use.
|
||||
type Temporality uint8
|
||||
|
||||
const (
|
||||
// CumulativeTemporality indicates that an Exporter expects a
|
||||
// Cumulative Aggregation.
|
||||
CumulativeTemporality Temporality = 1
|
||||
|
||||
// DeltaTemporality indicates that an Exporter expects a
|
||||
// Delta Aggregation.
|
||||
DeltaTemporality Temporality = 2
|
||||
)
|
||||
|
||||
// Includes returns if t includes support for other temporality.
|
||||
func (t Temporality) Includes(other Temporality) bool {
|
||||
return t&other != 0
|
||||
}
|
||||
|
||||
// MemoryRequired returns whether an exporter of this temporality requires
|
||||
// memory to export correctly.
|
||||
func (t Temporality) MemoryRequired(mkind sdkapi.InstrumentKind) bool {
|
||||
switch mkind {
|
||||
case sdkapi.HistogramInstrumentKind, sdkapi.GaugeObserverInstrumentKind,
|
||||
sdkapi.CounterInstrumentKind, sdkapi.UpDownCounterInstrumentKind:
|
||||
// Delta-oriented instruments:
|
||||
return t.Includes(CumulativeTemporality)
|
||||
|
||||
case sdkapi.CounterObserverInstrumentKind, sdkapi.UpDownCounterObserverInstrumentKind:
|
||||
// Cumulative-oriented instruments:
|
||||
return t.Includes(DeltaTemporality)
|
||||
}
|
||||
// Something unexpected is happening--we could panic. This
|
||||
// will become an error when the exporter tries to access a
|
||||
// checkpoint, presumably, so let it be.
|
||||
return false
|
||||
}
|
||||
|
||||
type (
|
||||
constantTemporalitySelector Temporality
|
||||
statelessTemporalitySelector struct{}
|
||||
)
|
||||
|
||||
var (
|
||||
_ TemporalitySelector = constantTemporalitySelector(0)
|
||||
_ TemporalitySelector = statelessTemporalitySelector{}
|
||||
)
|
||||
|
||||
// ConstantTemporalitySelector returns an TemporalitySelector that returns
|
||||
// a constant Temporality.
|
||||
func ConstantTemporalitySelector(t Temporality) TemporalitySelector {
|
||||
return constantTemporalitySelector(t)
|
||||
}
|
||||
|
||||
// CumulativeTemporalitySelector returns an TemporalitySelector that
|
||||
// always returns CumulativeTemporality.
|
||||
func CumulativeTemporalitySelector() TemporalitySelector {
|
||||
return ConstantTemporalitySelector(CumulativeTemporality)
|
||||
}
|
||||
|
||||
// DeltaTemporalitySelector returns an TemporalitySelector that
|
||||
// always returns DeltaTemporality.
|
||||
func DeltaTemporalitySelector() TemporalitySelector {
|
||||
return ConstantTemporalitySelector(DeltaTemporality)
|
||||
}
|
||||
|
||||
// StatelessTemporalitySelector returns an TemporalitySelector that
|
||||
// always returns the Temporality that avoids long-term memory
|
||||
// requirements.
|
||||
func StatelessTemporalitySelector() TemporalitySelector {
|
||||
return statelessTemporalitySelector{}
|
||||
}
|
||||
|
||||
// TemporalityFor implements TemporalitySelector.
|
||||
func (c constantTemporalitySelector) TemporalityFor(_ *sdkapi.Descriptor, _ Kind) Temporality {
|
||||
return Temporality(c)
|
||||
}
|
||||
|
||||
// TemporalityFor implements TemporalitySelector.
|
||||
func (s statelessTemporalitySelector) TemporalityFor(desc *sdkapi.Descriptor, kind Kind) Temporality {
|
||||
if kind == SumKind && desc.InstrumentKind().PrecomputedSum() {
|
||||
return CumulativeTemporality
|
||||
}
|
||||
return DeltaTemporality
|
||||
}
|
||||
|
||||
// TemporalitySelector is a sub-interface of Exporter used to indicate
|
||||
// whether the Processor should compute Delta or Cumulative
|
||||
// Aggregations.
|
||||
type TemporalitySelector interface {
|
||||
// TemporalityFor should return the correct Temporality that
|
||||
// should be used when exporting data for the given metric
|
||||
// instrument and Aggregator kind.
|
||||
TemporalityFor(descriptor *sdkapi.Descriptor, aggregationKind Kind) Temporality
|
||||
}
|
25
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/temporality_string.go
generated
vendored
Normal file
25
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/temporality_string.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// Code generated by "stringer -type=Temporality"; DO NOT EDIT.
|
||||
|
||||
package aggregation
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[CumulativeTemporality-1]
|
||||
_ = x[DeltaTemporality-2]
|
||||
}
|
||||
|
||||
const _Temporality_name = "CumulativeTemporalityDeltaTemporality"
|
||||
|
||||
var _Temporality_index = [...]uint8{0, 21, 37}
|
||||
|
||||
func (i Temporality) String() string {
|
||||
i -= 1
|
||||
if i >= Temporality(len(_Temporality_index)-1) {
|
||||
return "Temporality(" + strconv.FormatInt(int64(i+1), 10) + ")"
|
||||
}
|
||||
return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
|
||||
}
|
280
vendor/go.opentelemetry.io/otel/sdk/metric/export/metric.go
generated
vendored
Normal file
280
vendor/go.opentelemetry.io/otel/sdk/metric/export/metric.go
generated
vendored
Normal file
@@ -0,0 +1,280 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package export // import "go.opentelemetry.io/otel/sdk/metric/export"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// Processor is responsible for deciding which kind of aggregation to
|
||||
// use (via AggregatorSelector), gathering exported results from the
|
||||
// SDK during collection, and deciding over which dimensions to group
|
||||
// the exported data.
|
||||
//
|
||||
// The SDK supports binding only one of these interfaces, as it has
|
||||
// the sole responsibility of determining which Aggregator to use for
|
||||
// each record.
|
||||
//
|
||||
// The embedded AggregatorSelector interface is called (concurrently)
|
||||
// in instrumentation context to select the appropriate Aggregator for
|
||||
// an instrument.
|
||||
//
|
||||
// The `Process` method is called during collection in a
|
||||
// single-threaded context from the SDK, after the aggregator is
|
||||
// checkpointed, allowing the processor to build the set of metrics
|
||||
// currently being exported.
|
||||
type Processor interface {
|
||||
// AggregatorSelector is responsible for selecting the
|
||||
// concrete type of Aggregator used for a metric in the SDK.
|
||||
//
|
||||
// This may be a static decision based on fields of the
|
||||
// Descriptor, or it could use an external configuration
|
||||
// source to customize the treatment of each metric
|
||||
// instrument.
|
||||
//
|
||||
// The result from AggregatorSelector.AggregatorFor should be
|
||||
// the same type for a given Descriptor or else nil. The same
|
||||
// type should be returned for a given descriptor, because
|
||||
// Aggregators only know how to Merge with their own type. If
|
||||
// the result is nil, the metric instrument will be disabled.
|
||||
//
|
||||
// Note that the SDK only calls AggregatorFor when new records
|
||||
// require an Aggregator. This does not provide a way to
|
||||
// disable metrics with active records.
|
||||
AggregatorSelector
|
||||
|
||||
// Process is called by the SDK once per internal record, passing the
|
||||
// export Accumulation (a Descriptor, the corresponding attributes, and
|
||||
// the checkpointed Aggregator). This call has no Context argument because
|
||||
// it is expected to perform only computation. An SDK is not expected to
|
||||
// call exporters from with Process, use a controller for that (see
|
||||
// ./controllers/{pull,push}.
|
||||
Process(accum Accumulation) error
|
||||
}
|
||||
|
||||
// AggregatorSelector supports selecting the kind of Aggregator to
|
||||
// use at runtime for a specific metric instrument.
|
||||
type AggregatorSelector interface {
|
||||
// AggregatorFor allocates a variable number of aggregators of
|
||||
// a kind suitable for the requested export. This method
|
||||
// initializes a `...*Aggregator`, to support making a single
|
||||
// allocation.
|
||||
//
|
||||
// When the call returns without initializing the *Aggregator
|
||||
// to a non-nil value, the metric instrument is explicitly
|
||||
// disabled.
|
||||
//
|
||||
// This must return a consistent type to avoid confusion in
|
||||
// later stages of the metrics export process, i.e., when
|
||||
// Merging or Checkpointing aggregators for a specific
|
||||
// instrument.
|
||||
//
|
||||
// Note: This is context-free because the aggregator should
|
||||
// not relate to the incoming context. This call should not
|
||||
// block.
|
||||
AggregatorFor(descriptor *sdkapi.Descriptor, aggregator ...*aggregator.Aggregator)
|
||||
}
|
||||
|
||||
// Checkpointer is the interface used by a Controller to coordinate
|
||||
// the Processor with Accumulator(s) and Exporter(s). The
|
||||
// StartCollection() and FinishCollection() methods start and finish a
|
||||
// collection interval. Controllers call the Accumulator(s) during
|
||||
// collection to process Accumulations.
|
||||
type Checkpointer interface {
|
||||
// Processor processes metric data for export. The Process
|
||||
// method is bracketed by StartCollection and FinishCollection
|
||||
// calls. The embedded AggregatorSelector can be called at
|
||||
// any time.
|
||||
Processor
|
||||
|
||||
// Reader returns the current data set. This may be
|
||||
// called before and after collection. The
|
||||
// implementation is required to return the same value
|
||||
// throughout its lifetime, since Reader exposes a
|
||||
// sync.Locker interface. The caller is responsible for
|
||||
// locking the Reader before initiating collection.
|
||||
Reader() Reader
|
||||
|
||||
// StartCollection begins a collection interval.
|
||||
StartCollection()
|
||||
|
||||
// FinishCollection ends a collection interval.
|
||||
FinishCollection() error
|
||||
}
|
||||
|
||||
// CheckpointerFactory is an interface for producing configured
|
||||
// Checkpointer instances.
|
||||
type CheckpointerFactory interface {
|
||||
NewCheckpointer() Checkpointer
|
||||
}
|
||||
|
||||
// Exporter handles presentation of the checkpoint of aggregate
|
||||
// metrics. This is the final stage of a metrics export pipeline,
|
||||
// where metric data are formatted for a specific system.
|
||||
type Exporter interface {
|
||||
// Export is called immediately after completing a collection
|
||||
// pass in the SDK.
|
||||
//
|
||||
// The Context comes from the controller that initiated
|
||||
// collection.
|
||||
//
|
||||
// The InstrumentationLibraryReader interface refers to the
|
||||
// Processor that just completed collection.
|
||||
Export(ctx context.Context, resource *resource.Resource, reader InstrumentationLibraryReader) error
|
||||
|
||||
// TemporalitySelector is an interface used by the Processor
|
||||
// in deciding whether to compute Delta or Cumulative
|
||||
// Aggregations when passing Records to this Exporter.
|
||||
aggregation.TemporalitySelector
|
||||
}
|
||||
|
||||
// InstrumentationLibraryReader is an interface for exporters to iterate
|
||||
// over one instrumentation library of metric data at a time.
|
||||
type InstrumentationLibraryReader interface {
|
||||
// ForEach calls the passed function once per instrumentation library,
|
||||
// allowing the caller to emit metrics grouped by the library that
|
||||
// produced them.
|
||||
ForEach(readerFunc func(instrumentation.Library, Reader) error) error
|
||||
}
|
||||
|
||||
// Reader allows a controller to access a complete checkpoint of
|
||||
// aggregated metrics from the Processor for a single library of
|
||||
// metric data. This is passed to the Exporter which may then use
|
||||
// ForEach to iterate over the collection of aggregated metrics.
|
||||
type Reader interface {
|
||||
// ForEach iterates over aggregated checkpoints for all
|
||||
// metrics that were updated during the last collection
|
||||
// period. Each aggregated checkpoint returned by the
|
||||
// function parameter may return an error.
|
||||
//
|
||||
// The TemporalitySelector argument is used to determine
|
||||
// whether the Record is computed using Delta or Cumulative
|
||||
// aggregation.
|
||||
//
|
||||
// ForEach tolerates ErrNoData silently, as this is
|
||||
// expected from the Meter implementation. Any other kind
|
||||
// of error will immediately halt ForEach and return
|
||||
// the error to the caller.
|
||||
ForEach(tempSelector aggregation.TemporalitySelector, recordFunc func(Record) error) error
|
||||
|
||||
// Locker supports locking the checkpoint set. Collection
|
||||
// into the checkpoint set cannot take place (in case of a
|
||||
// stateful processor) while it is locked.
|
||||
//
|
||||
// The Processor attached to the Accumulator MUST be called
|
||||
// with the lock held.
|
||||
sync.Locker
|
||||
|
||||
// RLock acquires a read lock corresponding to this Locker.
|
||||
RLock()
|
||||
// RUnlock releases a read lock corresponding to this Locker.
|
||||
RUnlock()
|
||||
}
|
||||
|
||||
// Metadata contains the common elements for exported metric data that
|
||||
// are shared by the Accumulator->Processor and Processor->Exporter
|
||||
// steps.
|
||||
type Metadata struct {
|
||||
descriptor *sdkapi.Descriptor
|
||||
attrs *attribute.Set
|
||||
}
|
||||
|
||||
// Accumulation contains the exported data for a single metric instrument
|
||||
// and attribute set, as prepared by an Accumulator for the Processor.
|
||||
type Accumulation struct {
|
||||
Metadata
|
||||
aggregator aggregator.Aggregator
|
||||
}
|
||||
|
||||
// Record contains the exported data for a single metric instrument
|
||||
// and attribute set, as prepared by the Processor for the Exporter.
|
||||
// This includes the effective start and end time for the aggregation.
|
||||
type Record struct {
|
||||
Metadata
|
||||
aggregation aggregation.Aggregation
|
||||
start time.Time
|
||||
end time.Time
|
||||
}
|
||||
|
||||
// Descriptor describes the metric instrument being exported.
|
||||
func (m Metadata) Descriptor() *sdkapi.Descriptor {
|
||||
return m.descriptor
|
||||
}
|
||||
|
||||
// Attributes returns the attribute set associated with the instrument and the
|
||||
// aggregated data.
|
||||
func (m Metadata) Attributes() *attribute.Set {
|
||||
return m.attrs
|
||||
}
|
||||
|
||||
// NewAccumulation allows Accumulator implementations to construct new
|
||||
// Accumulations to send to Processors. The Descriptor, attributes, and
|
||||
// Aggregator represent aggregate metric events received over a single
|
||||
// collection period.
|
||||
func NewAccumulation(descriptor *sdkapi.Descriptor, attrs *attribute.Set, aggregator aggregator.Aggregator) Accumulation {
|
||||
return Accumulation{
|
||||
Metadata: Metadata{
|
||||
descriptor: descriptor,
|
||||
attrs: attrs,
|
||||
},
|
||||
aggregator: aggregator,
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregator returns the checkpointed aggregator. It is safe to
|
||||
// access the checkpointed state without locking.
|
||||
func (r Accumulation) Aggregator() aggregator.Aggregator {
|
||||
return r.aggregator
|
||||
}
|
||||
|
||||
// NewRecord allows Processor implementations to construct export records.
|
||||
// The Descriptor, attributes, and Aggregator represent aggregate metric
|
||||
// events received over a single collection period.
|
||||
func NewRecord(descriptor *sdkapi.Descriptor, attrs *attribute.Set, aggregation aggregation.Aggregation, start, end time.Time) Record {
|
||||
return Record{
|
||||
Metadata: Metadata{
|
||||
descriptor: descriptor,
|
||||
attrs: attrs,
|
||||
},
|
||||
aggregation: aggregation,
|
||||
start: start,
|
||||
end: end,
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregation returns the aggregation, an interface to the record and
|
||||
// its aggregator, dependent on the kind of both the input and exporter.
|
||||
func (r Record) Aggregation() aggregation.Aggregation {
|
||||
return r.aggregation
|
||||
}
|
||||
|
||||
// StartTime is the start time of the interval covered by this aggregation.
|
||||
func (r Record) StartTime() time.Time {
|
||||
return r.start
|
||||
}
|
||||
|
||||
// EndTime is the end time of the interval covered by this aggregation.
|
||||
func (r Record) EndTime() time.Time {
|
||||
return r.end
|
||||
}
|
Reference in New Issue
Block a user