chore: upgrade dependencies

This commit is contained in:
2022-06-08 00:07:52 +02:00
parent e22bdf96d9
commit 1e6966495c
185 changed files with 5385 additions and 4081 deletions

View File

@@ -15,19 +15,81 @@
package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator"
import (
"context"
"fmt"
"math"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// Aggregator implements a specific aggregation behavior, e.g., a
// behavior to track a sequence of updates to an instrument. Counter
// instruments commonly use a simple Sum aggregator, but for the
// distribution instruments (Histogram, GaugeObserver) there are a
// number of possible aggregators with different cost and accuracy
// tradeoffs.
//
// Note that any Aggregator may be attached to any instrument--this is
// the result of the OpenTelemetry API/SDK separation. It is possible
// to attach a Sum aggregator to a Histogram instrument.
type Aggregator interface {
// Aggregation returns an Aggregation interface to access the
// current state of this Aggregator. The caller is
// responsible for synchronization and must not call any the
// other methods in this interface concurrently while using
// the Aggregation.
Aggregation() aggregation.Aggregation
// Update receives a new measured value and incorporates it
// into the aggregation. Update() calls may be called
// concurrently.
//
// Descriptor.NumberKind() should be consulted to determine
// whether the provided number is an int64 or float64.
//
// The Context argument comes from user-level code and could be
// inspected for a `correlation.Map` or `trace.SpanContext`.
Update(ctx context.Context, number number.Number, descriptor *sdkapi.Descriptor) error
// SynchronizedMove is called during collection to finish one
// period of aggregation by atomically saving the
// currently-updating state into the argument Aggregator AND
// resetting the current value to the zero state.
//
// SynchronizedMove() is called concurrently with Update(). These
// two methods must be synchronized with respect to each
// other, for correctness.
//
// After saving a synchronized copy, the Aggregator can be converted
// into one or more of the interfaces in the `aggregation` sub-package,
// according to kind of Aggregator that was selected.
//
// This method will return an InconsistentAggregatorError if
// this Aggregator cannot be copied into the destination due
// to an incompatible type.
//
// This call has no Context argument because it is expected to
// perform only computation.
//
// When called with a nil `destination`, this Aggregator is reset
// and the current value is discarded.
SynchronizedMove(destination Aggregator, descriptor *sdkapi.Descriptor) error
// Merge combines the checkpointed state from the argument
// Aggregator into this Aggregator. Merge is not synchronized
// with respect to Update or SynchronizedMove.
//
// The owner of an Aggregator being merged is responsible for
// synchronization of both Aggregator states.
Merge(aggregator Aggregator, descriptor *sdkapi.Descriptor) error
}
// NewInconsistentAggregatorError formats an error describing an attempt to
// Checkpoint or Merge different-type aggregators. The result can be unwrapped as
// an ErrInconsistentType.
func NewInconsistentAggregatorError(a1, a2 export.Aggregator) error {
func NewInconsistentAggregatorError(a1, a2 Aggregator) error {
return fmt.Errorf("%w: %T and %T", aggregation.ErrInconsistentType, a1, a2)
}

View File

@@ -19,11 +19,10 @@ import (
"sort"
"sync"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// Note: This code uses a Mutex to govern access to the exclusive
@@ -89,7 +88,7 @@ var defaultFloat64ExplicitBoundaries = []float64{.005, .01, .025, .05, .1, .25,
const defaultInt64ExplicitBoundaryMultiplier = 1e6
// defaultInt64ExplicitBoundaries applies a multiplier to the default
// float64 boundaries: [ 5K, 10K, 25K, ..., 2.5M, 5M, 10M ]
// float64 boundaries: [ 5K, 10K, 25K, ..., 2.5M, 5M, 10M ].
var defaultInt64ExplicitBoundaries = func(bounds []float64) (asint []float64) {
for _, f := range bounds {
asint = append(asint, defaultInt64ExplicitBoundaryMultiplier*f)
@@ -97,7 +96,7 @@ var defaultInt64ExplicitBoundaries = func(bounds []float64) (asint []float64) {
return
}(defaultFloat64ExplicitBoundaries)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.Aggregator = &Aggregator{}
var _ aggregation.Sum = &Aggregator{}
var _ aggregation.Count = &Aggregator{}
var _ aggregation.Histogram = &Aggregator{}
@@ -174,7 +173,7 @@ func (c *Aggregator) Histogram() (aggregation.Buckets, error) {
// the empty set. Since no locks are taken, there is a chance that
// the independent Sum, Count and Bucket Count are not consistent with each
// other.
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *sdkapi.Descriptor) error {
func (c *Aggregator) SynchronizedMove(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
o, _ := oa.(*Aggregator)
if oa != nil && o == nil {
@@ -254,7 +253,7 @@ func (c *Aggregator) Update(_ context.Context, number number.Number, desc *sdkap
}
// Merge combines two histograms that have the same buckets into a single one.
func (c *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error {
func (c *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)

View File

@@ -20,11 +20,10 @@ import (
"time"
"unsafe"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
type (
@@ -43,15 +42,14 @@ type (
// value needs to be aligned for 64-bit atomic operations.
value number.Number
// timestamp indicates when this record was submitted.
// this can be used to pick a winner when multiple
// records contain lastValue data for the same labels due
// to races.
// timestamp indicates when this record was submitted. This can be
// used to pick a winner when multiple records contain lastValue data
// for the same attributes due to races.
timestamp time.Time
}
)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.Aggregator = &Aggregator{}
var _ aggregation.LastValue = &Aggregator{}
// An unset lastValue has zero timestamp and zero value.
@@ -92,7 +90,7 @@ func (g *Aggregator) LastValue() (number.Number, time.Time, error) {
}
// SynchronizedMove atomically saves the current value.
func (g *Aggregator) SynchronizedMove(oa export.Aggregator, _ *sdkapi.Descriptor) error {
func (g *Aggregator) SynchronizedMove(oa aggregator.Aggregator, _ *sdkapi.Descriptor) error {
if oa == nil {
atomic.StorePointer(&g.value, unsafe.Pointer(unsetLastValue))
return nil
@@ -117,7 +115,7 @@ func (g *Aggregator) Update(_ context.Context, number number.Number, desc *sdkap
// Merge combines state from two aggregators. The most-recently set
// value is chosen.
func (g *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error {
func (g *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(g, oa)

View File

@@ -17,11 +17,10 @@ package sum // import "go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
import (
"context"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// Aggregator aggregates counter events.
@@ -31,7 +30,7 @@ type Aggregator struct {
value number.Number
}
var _ export.Aggregator = &Aggregator{}
var _ aggregator.Aggregator = &Aggregator{}
var _ aggregation.Sum = &Aggregator{}
// New returns a new counter aggregator implemented by atomic
@@ -59,7 +58,7 @@ func (c *Aggregator) Sum() (number.Number, error) {
// SynchronizedMove atomically saves the current value into oa and resets the
// current sum to zero.
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, _ *sdkapi.Descriptor) error {
func (c *Aggregator) SynchronizedMove(oa aggregator.Aggregator, _ *sdkapi.Descriptor) error {
if oa == nil {
c.value.SetRawAtomic(0)
return nil
@@ -79,7 +78,7 @@ func (c *Aggregator) Update(_ context.Context, num number.Number, desc *sdkapi.D
}
// Merge combines two counters by adding their sums.
func (c *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error {
func (c *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)

View File

@@ -16,6 +16,7 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
import "unsafe"
// Deprecated: will be removed soon.
func AtomicFieldOffsets() map[string]uintptr {
return map[string]uintptr{
"record.refMapped.value": unsafe.Offsetof(record{}.refMapped.value),

View File

@@ -18,7 +18,7 @@ import (
"time"
"go.opentelemetry.io/otel"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/resource"
)
@@ -62,7 +62,7 @@ type config struct {
// Option is the interface that applies the value to a configuration option.
type Option interface {
// apply sets the Option value of a Config.
apply(*config)
apply(config) config
}
// WithResource sets the Resource configuration option of a Config by merging it
@@ -73,12 +73,13 @@ func WithResource(r *resource.Resource) Option {
type resourceOption struct{ *resource.Resource }
func (o resourceOption) apply(cfg *config) {
func (o resourceOption) apply(cfg config) config {
res, err := resource.Merge(cfg.Resource, o.Resource)
if err != nil {
otel.Handle(err)
}
cfg.Resource = res
return cfg
}
// WithCollectPeriod sets the CollectPeriod configuration option of a Config.
@@ -88,8 +89,9 @@ func WithCollectPeriod(period time.Duration) Option {
type collectPeriodOption time.Duration
func (o collectPeriodOption) apply(cfg *config) {
func (o collectPeriodOption) apply(cfg config) config {
cfg.CollectPeriod = time.Duration(o)
return cfg
}
// WithCollectTimeout sets the CollectTimeout configuration option of a Config.
@@ -99,8 +101,9 @@ func WithCollectTimeout(timeout time.Duration) Option {
type collectTimeoutOption time.Duration
func (o collectTimeoutOption) apply(cfg *config) {
func (o collectTimeoutOption) apply(cfg config) config {
cfg.CollectTimeout = time.Duration(o)
return cfg
}
// WithExporter sets the exporter configuration option of a Config.
@@ -110,8 +113,9 @@ func WithExporter(exporter export.Exporter) Option {
type exporterOption struct{ exporter export.Exporter }
func (o exporterOption) apply(cfg *config) {
func (o exporterOption) apply(cfg config) config {
cfg.Exporter = o.exporter
return cfg
}
// WithPushTimeout sets the PushTimeout configuration option of a Config.
@@ -121,6 +125,7 @@ func WithPushTimeout(timeout time.Duration) Option {
type pushTimeoutOption time.Duration
func (o pushTimeoutOption) apply(cfg *config) {
func (o pushTimeoutOption) apply(cfg config) config {
cfg.PushTimeout = time.Duration(o)
return cfg
}

View File

@@ -21,12 +21,13 @@ import (
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/metric/registry"
"go.opentelemetry.io/otel/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/instrumentation"
sdk "go.opentelemetry.io/otel/sdk/metric"
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/metric/registry"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
"go.opentelemetry.io/otel/sdk/resource"
)
@@ -99,7 +100,7 @@ func (c *Controller) Meter(instrumentationName string, opts ...metric.MeterOptio
library: library,
}))
}
return metric.WrapMeterImpl(m.(*registry.UniqueInstrumentMeterImpl))
return sdkapi.WrapMeterImpl(m.(*registry.UniqueInstrumentMeterImpl))
}
type accumulatorCheckpointer struct {
@@ -108,17 +109,19 @@ type accumulatorCheckpointer struct {
library instrumentation.Library
}
var _ sdkapi.MeterImpl = &accumulatorCheckpointer{}
// New constructs a Controller using the provided checkpointer factory
// and options (including optional exporter) to configure a metric
// export pipeline.
func New(checkpointerFactory export.CheckpointerFactory, opts ...Option) *Controller {
c := &config{
c := config{
CollectPeriod: DefaultPeriod,
CollectTimeout: DefaultPeriod,
PushTimeout: DefaultPeriod,
}
for _, opt := range opts {
opt.apply(c)
c = opt.apply(c)
}
if c.Resource == nil {
c.Resource = resource.Default()

View File

@@ -39,15 +39,15 @@ instrument callbacks.
Internal Structure
Each observer also has its own kind of record stored in the SDK. This
record contains a set of recorders for every specific label set used in the
callback.
record contains a set of recorders for every specific attribute set used in
the callback.
A sync.Map maintains the mapping of current instruments and label sets to
internal records. To find a record, the SDK consults the Map to
locate an existing record, otherwise it constructs a new record. The SDK
maintains a count of the number of references to each record, ensuring
that records are not reclaimed from the Map while they are still active
from the user's perspective.
A sync.Map maintains the mapping of current instruments and attribute sets to
internal records. To find a record, the SDK consults the Map to locate an
existing record, otherwise it constructs a new record. The SDK maintains a
count of the number of references to each record, ensuring that records are
not reclaimed from the Map while they are still active from the user's
perspective.
Metric collection is performed via a single-threaded call to Collect that
sweeps through all records in the SDK, checkpointing their state. When a
@@ -65,7 +65,7 @@ Export Pipeline
While the SDK serves to maintain a current set of records and
coordinate collection, the behavior of a metrics export pipeline is
configured through the export types in
go.opentelemetry.io/otel/sdk/export/metric. It is important to keep
go.opentelemetry.io/otel/sdk/metric/export. It is important to keep
in mind the context these interfaces are called from. There are two
contexts, instrumentation context, where a user-level goroutine that
enters the SDK resulting in a new record, and collection context,
@@ -106,11 +106,6 @@ Processor implementations are provided, the "defaultkeys" Processor groups
aggregate metrics by their recommended Descriptor.Keys(), the
"simple" Processor aggregates metrics at full dimensionality.
LabelEncoder is an optional optimization that allows an exporter to
provide the serialization logic for labels. This allows avoiding
duplicate serialization of labels, once as a unique key in the SDK (or
Processor) and once in the exporter.
Reader is an interface between the Processor and the Exporter.
After completing a collection pass, the Processor.Reader() method
returns a Reader, which the Exporter uses to iterate over all
@@ -118,10 +113,7 @@ the updated metrics.
Record is a struct containing the state of an individual exported
metric. This is the result of one collection interface for one
instrument and one label set.
Labels is a struct containing an ordered set of labels, the
corresponding unique encoding, and the encoder that produced it.
instrument and one attribute set.
Exporter is the final stage of an export pipeline. It is called with
a Reader capable of enumerating all the updated metrics.

View File

@@ -0,0 +1,119 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
import (
"fmt"
"time"
"go.opentelemetry.io/otel/sdk/metric/number"
)
// These interfaces describe the various ways to access state from an
// Aggregation.
type (
// Aggregation is an interface returned by the Aggregator
// containing an interval of metric data.
Aggregation interface {
// Kind returns a short identifying string to identify
// the Aggregator that was used to produce the
// Aggregation (e.g., "Sum").
Kind() Kind
}
// Sum returns an aggregated sum.
Sum interface {
Aggregation
Sum() (number.Number, error)
}
// Count returns the number of values that were aggregated.
Count interface {
Aggregation
Count() (uint64, error)
}
// LastValue returns the latest value that was aggregated.
LastValue interface {
Aggregation
LastValue() (number.Number, time.Time, error)
}
// Buckets represents histogram buckets boundaries and counts.
//
// For a Histogram with N defined boundaries, e.g, [x, y, z].
// There are N+1 counts: [-inf, x), [x, y), [y, z), [z, +inf].
Buckets struct {
// Boundaries are floating point numbers, even when
// aggregating integers.
Boundaries []float64
// Counts holds the count in each bucket.
Counts []uint64
}
// Histogram returns the count of events in pre-determined buckets.
Histogram interface {
Aggregation
Count() (uint64, error)
Sum() (number.Number, error)
Histogram() (Buckets, error)
}
)
type (
// Kind is a short name for the Aggregator that produces an
// Aggregation, used for descriptive purpose only. Kind is a
// string to allow user-defined Aggregators.
//
// When deciding how to handle an Aggregation, Exporters are
// encouraged to decide based on conversion to the above
// interfaces based on strength, not on Kind value, when
// deciding how to expose metric data. This enables
// user-supplied Aggregators to replace builtin Aggregators.
//
// For example, test for a Histogram before testing for a
// Sum, and so on.
Kind string
)
// Kind description constants.
const (
SumKind Kind = "Sum"
HistogramKind Kind = "Histogram"
LastValueKind Kind = "Lastvalue"
)
// Sentinel errors for Aggregation interface.
var (
ErrNegativeInput = fmt.Errorf("negative value is out of range for this instrument")
ErrNaNInput = fmt.Errorf("NaN value is an invalid input")
ErrInconsistentType = fmt.Errorf("inconsistent aggregator types")
// ErrNoCumulativeToDelta is returned when requesting delta
// export kind for a precomputed sum instrument.
ErrNoCumulativeToDelta = fmt.Errorf("cumulative to delta not implemented")
// ErrNoData is returned when (due to a race with collection)
// the Aggregator is check-pointed before the first value is set.
// The aggregator should simply be skipped in this case.
ErrNoData = fmt.Errorf("no data collected by this aggregator")
)
// String returns the string value of Kind.
func (k Kind) String() string {
return string(k)
}

View File

@@ -0,0 +1,117 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate stringer -type=Temporality
package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
import (
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// Temporality indicates the temporal aggregation exported by an exporter.
// These bits may be OR-d together when multiple exporters are in use.
type Temporality uint8
const (
// CumulativeTemporality indicates that an Exporter expects a
// Cumulative Aggregation.
CumulativeTemporality Temporality = 1
// DeltaTemporality indicates that an Exporter expects a
// Delta Aggregation.
DeltaTemporality Temporality = 2
)
// Includes returns if t includes support for other temporality.
func (t Temporality) Includes(other Temporality) bool {
return t&other != 0
}
// MemoryRequired returns whether an exporter of this temporality requires
// memory to export correctly.
func (t Temporality) MemoryRequired(mkind sdkapi.InstrumentKind) bool {
switch mkind {
case sdkapi.HistogramInstrumentKind, sdkapi.GaugeObserverInstrumentKind,
sdkapi.CounterInstrumentKind, sdkapi.UpDownCounterInstrumentKind:
// Delta-oriented instruments:
return t.Includes(CumulativeTemporality)
case sdkapi.CounterObserverInstrumentKind, sdkapi.UpDownCounterObserverInstrumentKind:
// Cumulative-oriented instruments:
return t.Includes(DeltaTemporality)
}
// Something unexpected is happening--we could panic. This
// will become an error when the exporter tries to access a
// checkpoint, presumably, so let it be.
return false
}
type (
constantTemporalitySelector Temporality
statelessTemporalitySelector struct{}
)
var (
_ TemporalitySelector = constantTemporalitySelector(0)
_ TemporalitySelector = statelessTemporalitySelector{}
)
// ConstantTemporalitySelector returns an TemporalitySelector that returns
// a constant Temporality.
func ConstantTemporalitySelector(t Temporality) TemporalitySelector {
return constantTemporalitySelector(t)
}
// CumulativeTemporalitySelector returns an TemporalitySelector that
// always returns CumulativeTemporality.
func CumulativeTemporalitySelector() TemporalitySelector {
return ConstantTemporalitySelector(CumulativeTemporality)
}
// DeltaTemporalitySelector returns an TemporalitySelector that
// always returns DeltaTemporality.
func DeltaTemporalitySelector() TemporalitySelector {
return ConstantTemporalitySelector(DeltaTemporality)
}
// StatelessTemporalitySelector returns an TemporalitySelector that
// always returns the Temporality that avoids long-term memory
// requirements.
func StatelessTemporalitySelector() TemporalitySelector {
return statelessTemporalitySelector{}
}
// TemporalityFor implements TemporalitySelector.
func (c constantTemporalitySelector) TemporalityFor(_ *sdkapi.Descriptor, _ Kind) Temporality {
return Temporality(c)
}
// TemporalityFor implements TemporalitySelector.
func (s statelessTemporalitySelector) TemporalityFor(desc *sdkapi.Descriptor, kind Kind) Temporality {
if kind == SumKind && desc.InstrumentKind().PrecomputedSum() {
return CumulativeTemporality
}
return DeltaTemporality
}
// TemporalitySelector is a sub-interface of Exporter used to indicate
// whether the Processor should compute Delta or Cumulative
// Aggregations.
type TemporalitySelector interface {
// TemporalityFor should return the correct Temporality that
// should be used when exporting data for the given metric
// instrument and Aggregator kind.
TemporalityFor(descriptor *sdkapi.Descriptor, aggregationKind Kind) Temporality
}

View File

@@ -0,0 +1,25 @@
// Code generated by "stringer -type=Temporality"; DO NOT EDIT.
package aggregation
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[CumulativeTemporality-1]
_ = x[DeltaTemporality-2]
}
const _Temporality_name = "CumulativeTemporalityDeltaTemporality"
var _Temporality_index = [...]uint8{0, 21, 37}
func (i Temporality) String() string {
i -= 1
if i >= Temporality(len(_Temporality_index)-1) {
return "Temporality(" + strconv.FormatInt(int64(i+1), 10) + ")"
}
return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
}

View File

@@ -0,0 +1,280 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package export // import "go.opentelemetry.io/otel/sdk/metric/export"
import (
"context"
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
"go.opentelemetry.io/otel/sdk/resource"
)
// Processor is responsible for deciding which kind of aggregation to
// use (via AggregatorSelector), gathering exported results from the
// SDK during collection, and deciding over which dimensions to group
// the exported data.
//
// The SDK supports binding only one of these interfaces, as it has
// the sole responsibility of determining which Aggregator to use for
// each record.
//
// The embedded AggregatorSelector interface is called (concurrently)
// in instrumentation context to select the appropriate Aggregator for
// an instrument.
//
// The `Process` method is called during collection in a
// single-threaded context from the SDK, after the aggregator is
// checkpointed, allowing the processor to build the set of metrics
// currently being exported.
type Processor interface {
// AggregatorSelector is responsible for selecting the
// concrete type of Aggregator used for a metric in the SDK.
//
// This may be a static decision based on fields of the
// Descriptor, or it could use an external configuration
// source to customize the treatment of each metric
// instrument.
//
// The result from AggregatorSelector.AggregatorFor should be
// the same type for a given Descriptor or else nil. The same
// type should be returned for a given descriptor, because
// Aggregators only know how to Merge with their own type. If
// the result is nil, the metric instrument will be disabled.
//
// Note that the SDK only calls AggregatorFor when new records
// require an Aggregator. This does not provide a way to
// disable metrics with active records.
AggregatorSelector
// Process is called by the SDK once per internal record, passing the
// export Accumulation (a Descriptor, the corresponding attributes, and
// the checkpointed Aggregator). This call has no Context argument because
// it is expected to perform only computation. An SDK is not expected to
// call exporters from with Process, use a controller for that (see
// ./controllers/{pull,push}.
Process(accum Accumulation) error
}
// AggregatorSelector supports selecting the kind of Aggregator to
// use at runtime for a specific metric instrument.
type AggregatorSelector interface {
// AggregatorFor allocates a variable number of aggregators of
// a kind suitable for the requested export. This method
// initializes a `...*Aggregator`, to support making a single
// allocation.
//
// When the call returns without initializing the *Aggregator
// to a non-nil value, the metric instrument is explicitly
// disabled.
//
// This must return a consistent type to avoid confusion in
// later stages of the metrics export process, i.e., when
// Merging or Checkpointing aggregators for a specific
// instrument.
//
// Note: This is context-free because the aggregator should
// not relate to the incoming context. This call should not
// block.
AggregatorFor(descriptor *sdkapi.Descriptor, aggregator ...*aggregator.Aggregator)
}
// Checkpointer is the interface used by a Controller to coordinate
// the Processor with Accumulator(s) and Exporter(s). The
// StartCollection() and FinishCollection() methods start and finish a
// collection interval. Controllers call the Accumulator(s) during
// collection to process Accumulations.
type Checkpointer interface {
// Processor processes metric data for export. The Process
// method is bracketed by StartCollection and FinishCollection
// calls. The embedded AggregatorSelector can be called at
// any time.
Processor
// Reader returns the current data set. This may be
// called before and after collection. The
// implementation is required to return the same value
// throughout its lifetime, since Reader exposes a
// sync.Locker interface. The caller is responsible for
// locking the Reader before initiating collection.
Reader() Reader
// StartCollection begins a collection interval.
StartCollection()
// FinishCollection ends a collection interval.
FinishCollection() error
}
// CheckpointerFactory is an interface for producing configured
// Checkpointer instances.
type CheckpointerFactory interface {
NewCheckpointer() Checkpointer
}
// Exporter handles presentation of the checkpoint of aggregate
// metrics. This is the final stage of a metrics export pipeline,
// where metric data are formatted for a specific system.
type Exporter interface {
// Export is called immediately after completing a collection
// pass in the SDK.
//
// The Context comes from the controller that initiated
// collection.
//
// The InstrumentationLibraryReader interface refers to the
// Processor that just completed collection.
Export(ctx context.Context, resource *resource.Resource, reader InstrumentationLibraryReader) error
// TemporalitySelector is an interface used by the Processor
// in deciding whether to compute Delta or Cumulative
// Aggregations when passing Records to this Exporter.
aggregation.TemporalitySelector
}
// InstrumentationLibraryReader is an interface for exporters to iterate
// over one instrumentation library of metric data at a time.
type InstrumentationLibraryReader interface {
// ForEach calls the passed function once per instrumentation library,
// allowing the caller to emit metrics grouped by the library that
// produced them.
ForEach(readerFunc func(instrumentation.Library, Reader) error) error
}
// Reader allows a controller to access a complete checkpoint of
// aggregated metrics from the Processor for a single library of
// metric data. This is passed to the Exporter which may then use
// ForEach to iterate over the collection of aggregated metrics.
type Reader interface {
// ForEach iterates over aggregated checkpoints for all
// metrics that were updated during the last collection
// period. Each aggregated checkpoint returned by the
// function parameter may return an error.
//
// The TemporalitySelector argument is used to determine
// whether the Record is computed using Delta or Cumulative
// aggregation.
//
// ForEach tolerates ErrNoData silently, as this is
// expected from the Meter implementation. Any other kind
// of error will immediately halt ForEach and return
// the error to the caller.
ForEach(tempSelector aggregation.TemporalitySelector, recordFunc func(Record) error) error
// Locker supports locking the checkpoint set. Collection
// into the checkpoint set cannot take place (in case of a
// stateful processor) while it is locked.
//
// The Processor attached to the Accumulator MUST be called
// with the lock held.
sync.Locker
// RLock acquires a read lock corresponding to this Locker.
RLock()
// RUnlock releases a read lock corresponding to this Locker.
RUnlock()
}
// Metadata contains the common elements for exported metric data that
// are shared by the Accumulator->Processor and Processor->Exporter
// steps.
type Metadata struct {
descriptor *sdkapi.Descriptor
attrs *attribute.Set
}
// Accumulation contains the exported data for a single metric instrument
// and attribute set, as prepared by an Accumulator for the Processor.
type Accumulation struct {
Metadata
aggregator aggregator.Aggregator
}
// Record contains the exported data for a single metric instrument
// and attribute set, as prepared by the Processor for the Exporter.
// This includes the effective start and end time for the aggregation.
type Record struct {
Metadata
aggregation aggregation.Aggregation
start time.Time
end time.Time
}
// Descriptor describes the metric instrument being exported.
func (m Metadata) Descriptor() *sdkapi.Descriptor {
return m.descriptor
}
// Attributes returns the attribute set associated with the instrument and the
// aggregated data.
func (m Metadata) Attributes() *attribute.Set {
return m.attrs
}
// NewAccumulation allows Accumulator implementations to construct new
// Accumulations to send to Processors. The Descriptor, attributes, and
// Aggregator represent aggregate metric events received over a single
// collection period.
func NewAccumulation(descriptor *sdkapi.Descriptor, attrs *attribute.Set, aggregator aggregator.Aggregator) Accumulation {
return Accumulation{
Metadata: Metadata{
descriptor: descriptor,
attrs: attrs,
},
aggregator: aggregator,
}
}
// Aggregator returns the checkpointed aggregator. It is safe to
// access the checkpointed state without locking.
func (r Accumulation) Aggregator() aggregator.Aggregator {
return r.aggregator
}
// NewRecord allows Processor implementations to construct export records.
// The Descriptor, attributes, and Aggregator represent aggregate metric
// events received over a single collection period.
func NewRecord(descriptor *sdkapi.Descriptor, attrs *attribute.Set, aggregation aggregation.Aggregation, start, end time.Time) Record {
return Record{
Metadata: Metadata{
descriptor: descriptor,
attrs: attrs,
},
aggregation: aggregation,
start: start,
end: end,
}
}
// Aggregation returns the aggregation, an interface to the record and
// its aggregator, dependent on the kind of both the input and exporter.
func (r Record) Aggregation() aggregation.Aggregation {
return r.aggregation
}
// StartTime is the start time of the interval covered by this aggregation.
func (r Record) StartTime() time.Time {
return r.start
}
// EndTime is the end time of the interval covered by this aggregation.
func (r Record) EndTime() time.Time {
return r.end
}

View File

@@ -0,0 +1,23 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package number provides a number abstraction for instruments that
either support int64 or float64 input values.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
*/
package number // import "go.opentelemetry.io/otel/sdk/metric/number"

View File

@@ -0,0 +1,24 @@
// Code generated by "stringer -type=Kind"; DO NOT EDIT.
package number
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Int64Kind-0]
_ = x[Float64Kind-1]
}
const _Kind_name = "Int64KindFloat64Kind"
var _Kind_index = [...]uint8{0, 9, 20}
func (i Kind) String() string {
if i < 0 || i >= Kind(len(_Kind_index)-1) {
return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
}

View File

@@ -0,0 +1,538 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package number // import "go.opentelemetry.io/otel/sdk/metric/number"
//go:generate stringer -type=Kind
import (
"fmt"
"math"
"sync/atomic"
"go.opentelemetry.io/otel/internal"
)
// Kind describes the data type of the Number.
type Kind int8
const (
// Int64Kind means that the Number stores int64.
Int64Kind Kind = iota
// Float64Kind means that the Number stores float64.
Float64Kind
)
// Zero returns a zero value for a given Kind.
func (k Kind) Zero() Number {
switch k {
case Int64Kind:
return NewInt64Number(0)
case Float64Kind:
return NewFloat64Number(0.)
default:
return Number(0)
}
}
// Minimum returns the minimum representable value
// for a given Kind.
func (k Kind) Minimum() Number {
switch k {
case Int64Kind:
return NewInt64Number(math.MinInt64)
case Float64Kind:
return NewFloat64Number(-1. * math.MaxFloat64)
default:
return Number(0)
}
}
// Maximum returns the maximum representable value
// for a given Kind.
func (k Kind) Maximum() Number {
switch k {
case Int64Kind:
return NewInt64Number(math.MaxInt64)
case Float64Kind:
return NewFloat64Number(math.MaxFloat64)
default:
return Number(0)
}
}
// Number represents either an integral or a floating point value. It
// needs to be accompanied with a source of Kind that describes
// the actual type of the value stored within Number.
type Number uint64
// - constructors
// NewNumberFromRaw creates a new Number from a raw value.
func NewNumberFromRaw(r uint64) Number {
return Number(r)
}
// NewInt64Number creates an integral Number.
func NewInt64Number(i int64) Number {
return NewNumberFromRaw(internal.Int64ToRaw(i))
}
// NewFloat64Number creates a floating point Number.
func NewFloat64Number(f float64) Number {
return NewNumberFromRaw(internal.Float64ToRaw(f))
}
// NewNumberSignChange returns a number with the same magnitude and
// the opposite sign. `kind` must describe the kind of number in `nn`.
func NewNumberSignChange(kind Kind, nn Number) Number {
switch kind {
case Int64Kind:
return NewInt64Number(-nn.AsInt64())
case Float64Kind:
return NewFloat64Number(-nn.AsFloat64())
}
return nn
}
// - as x
// AsNumber gets the Number.
func (n *Number) AsNumber() Number {
return *n
}
// AsRaw gets the uninterpreted raw value. Might be useful for some
// atomic operations.
func (n *Number) AsRaw() uint64 {
return uint64(*n)
}
// AsInt64 assumes that the value contains an int64 and returns it as
// such.
func (n *Number) AsInt64() int64 {
return internal.RawToInt64(n.AsRaw())
}
// AsFloat64 assumes that the measurement value contains a float64 and
// returns it as such.
func (n *Number) AsFloat64() float64 {
return internal.RawToFloat64(n.AsRaw())
}
// - as x atomic
// AsNumberAtomic gets the Number atomically.
func (n *Number) AsNumberAtomic() Number {
return NewNumberFromRaw(n.AsRawAtomic())
}
// AsRawAtomic gets the uninterpreted raw value atomically. Might be
// useful for some atomic operations.
func (n *Number) AsRawAtomic() uint64 {
return atomic.LoadUint64(n.AsRawPtr())
}
// AsInt64Atomic assumes that the number contains an int64 and returns
// it as such atomically.
func (n *Number) AsInt64Atomic() int64 {
return atomic.LoadInt64(n.AsInt64Ptr())
}
// AsFloat64Atomic assumes that the measurement value contains a
// float64 and returns it as such atomically.
func (n *Number) AsFloat64Atomic() float64 {
return internal.RawToFloat64(n.AsRawAtomic())
}
// - as x ptr
// AsRawPtr gets the pointer to the raw, uninterpreted raw
// value. Might be useful for some atomic operations.
func (n *Number) AsRawPtr() *uint64 {
return (*uint64)(n)
}
// AsInt64Ptr assumes that the number contains an int64 and returns a
// pointer to it.
func (n *Number) AsInt64Ptr() *int64 {
return internal.RawPtrToInt64Ptr(n.AsRawPtr())
}
// AsFloat64Ptr assumes that the number contains a float64 and returns a
// pointer to it.
func (n *Number) AsFloat64Ptr() *float64 {
return internal.RawPtrToFloat64Ptr(n.AsRawPtr())
}
// - coerce
// CoerceToInt64 casts the number to int64. May result in
// data/precision loss.
func (n *Number) CoerceToInt64(kind Kind) int64 {
switch kind {
case Int64Kind:
return n.AsInt64()
case Float64Kind:
return int64(n.AsFloat64())
default:
// you get what you deserve
return 0
}
}
// CoerceToFloat64 casts the number to float64. May result in
// data/precision loss.
func (n *Number) CoerceToFloat64(kind Kind) float64 {
switch kind {
case Int64Kind:
return float64(n.AsInt64())
case Float64Kind:
return n.AsFloat64()
default:
// you get what you deserve
return 0
}
}
// - set
// SetNumber sets the number to the passed number. Both should be of
// the same kind.
func (n *Number) SetNumber(nn Number) {
*n.AsRawPtr() = nn.AsRaw()
}
// SetRaw sets the number to the passed raw value. Both number and the
// raw number should represent the same kind.
func (n *Number) SetRaw(r uint64) {
*n.AsRawPtr() = r
}
// SetInt64 assumes that the number contains an int64 and sets it to
// the passed value.
func (n *Number) SetInt64(i int64) {
*n.AsInt64Ptr() = i
}
// SetFloat64 assumes that the number contains a float64 and sets it
// to the passed value.
func (n *Number) SetFloat64(f float64) {
*n.AsFloat64Ptr() = f
}
// - set atomic
// SetNumberAtomic sets the number to the passed number
// atomically. Both should be of the same kind.
func (n *Number) SetNumberAtomic(nn Number) {
atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw())
}
// SetRawAtomic sets the number to the passed raw value
// atomically. Both number and the raw number should represent the
// same kind.
func (n *Number) SetRawAtomic(r uint64) {
atomic.StoreUint64(n.AsRawPtr(), r)
}
// SetInt64Atomic assumes that the number contains an int64 and sets
// it to the passed value atomically.
func (n *Number) SetInt64Atomic(i int64) {
atomic.StoreInt64(n.AsInt64Ptr(), i)
}
// SetFloat64Atomic assumes that the number contains a float64 and
// sets it to the passed value atomically.
func (n *Number) SetFloat64Atomic(f float64) {
atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f))
}
// - swap
// SwapNumber sets the number to the passed number and returns the old
// number. Both this number and the passed number should be of the
// same kind.
func (n *Number) SwapNumber(nn Number) Number {
old := *n
n.SetNumber(nn)
return old
}
// SwapRaw sets the number to the passed raw value and returns the old
// raw value. Both number and the raw number should represent the same
// kind.
func (n *Number) SwapRaw(r uint64) uint64 {
old := n.AsRaw()
n.SetRaw(r)
return old
}
// SwapInt64 assumes that the number contains an int64, sets it to the
// passed value and returns the old int64 value.
func (n *Number) SwapInt64(i int64) int64 {
old := n.AsInt64()
n.SetInt64(i)
return old
}
// SwapFloat64 assumes that the number contains an float64, sets it to
// the passed value and returns the old float64 value.
func (n *Number) SwapFloat64(f float64) float64 {
old := n.AsFloat64()
n.SetFloat64(f)
return old
}
// - swap atomic
// SwapNumberAtomic sets the number to the passed number and returns
// the old number atomically. Both this number and the passed number
// should be of the same kind.
func (n *Number) SwapNumberAtomic(nn Number) Number {
return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw()))
}
// SwapRawAtomic sets the number to the passed raw value and returns
// the old raw value atomically. Both number and the raw number should
// represent the same kind.
func (n *Number) SwapRawAtomic(r uint64) uint64 {
return atomic.SwapUint64(n.AsRawPtr(), r)
}
// SwapInt64Atomic assumes that the number contains an int64, sets it
// to the passed value and returns the old int64 value atomically.
func (n *Number) SwapInt64Atomic(i int64) int64 {
return atomic.SwapInt64(n.AsInt64Ptr(), i)
}
// SwapFloat64Atomic assumes that the number contains an float64, sets
// it to the passed value and returns the old float64 value
// atomically.
func (n *Number) SwapFloat64Atomic(f float64) float64 {
return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f)))
}
// - add
// AddNumber assumes that this and the passed number are of the passed
// kind and adds the passed number to this number.
func (n *Number) AddNumber(kind Kind, nn Number) {
switch kind {
case Int64Kind:
n.AddInt64(nn.AsInt64())
case Float64Kind:
n.AddFloat64(nn.AsFloat64())
}
}
// AddRaw assumes that this number and the passed raw value are of the
// passed kind and adds the passed raw value to this number.
func (n *Number) AddRaw(kind Kind, r uint64) {
n.AddNumber(kind, NewNumberFromRaw(r))
}
// AddInt64 assumes that the number contains an int64 and adds the
// passed int64 to it.
func (n *Number) AddInt64(i int64) {
*n.AsInt64Ptr() += i
}
// AddFloat64 assumes that the number contains a float64 and adds the
// passed float64 to it.
func (n *Number) AddFloat64(f float64) {
*n.AsFloat64Ptr() += f
}
// - add atomic
// AddNumberAtomic assumes that this and the passed number are of the
// passed kind and adds the passed number to this number atomically.
func (n *Number) AddNumberAtomic(kind Kind, nn Number) {
switch kind {
case Int64Kind:
n.AddInt64Atomic(nn.AsInt64())
case Float64Kind:
n.AddFloat64Atomic(nn.AsFloat64())
}
}
// AddRawAtomic assumes that this number and the passed raw value are
// of the passed kind and adds the passed raw value to this number
// atomically.
func (n *Number) AddRawAtomic(kind Kind, r uint64) {
n.AddNumberAtomic(kind, NewNumberFromRaw(r))
}
// AddInt64Atomic assumes that the number contains an int64 and adds
// the passed int64 to it atomically.
func (n *Number) AddInt64Atomic(i int64) {
atomic.AddInt64(n.AsInt64Ptr(), i)
}
// AddFloat64Atomic assumes that the number contains a float64 and
// adds the passed float64 to it atomically.
func (n *Number) AddFloat64Atomic(f float64) {
for {
o := n.AsFloat64Atomic()
if n.CompareAndSwapFloat64(o, o+f) {
break
}
}
}
// - compare and swap (atomic only)
// CompareAndSwapNumber does the atomic CAS operation on this
// number. This number and passed old and new numbers should be of the
// same kind.
func (n *Number) CompareAndSwapNumber(on, nn Number) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw())
}
// CompareAndSwapRaw does the atomic CAS operation on this
// number. This number and passed old and new raw values should be of
// the same kind.
func (n *Number) CompareAndSwapRaw(or, nr uint64) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr)
}
// CompareAndSwapInt64 assumes that this number contains an int64 and
// does the atomic CAS operation on it.
func (n *Number) CompareAndSwapInt64(oi, ni int64) bool {
return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni)
}
// CompareAndSwapFloat64 assumes that this number contains a float64 and
// does the atomic CAS operation on it.
func (n *Number) CompareAndSwapFloat64(of, nf float64) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf))
}
// - compare
// CompareNumber compares two Numbers given their kind. Both numbers
// should have the same kind. This returns:
// 0 if the numbers are equal
// -1 if the subject `n` is less than the argument `nn`
// +1 if the subject `n` is greater than the argument `nn`
func (n *Number) CompareNumber(kind Kind, nn Number) int {
switch kind {
case Int64Kind:
return n.CompareInt64(nn.AsInt64())
case Float64Kind:
return n.CompareFloat64(nn.AsFloat64())
default:
// you get what you deserve
return 0
}
}
// CompareRaw compares two numbers, where one is input as a raw
// uint64, interpreting both values as a `kind` of number.
func (n *Number) CompareRaw(kind Kind, r uint64) int {
return n.CompareNumber(kind, NewNumberFromRaw(r))
}
// CompareInt64 assumes that the Number contains an int64 and performs
// a comparison between the value and the other value. It returns the
// typical result of the compare function: -1 if the value is less
// than the other, 0 if both are equal, 1 if the value is greater than
// the other.
func (n *Number) CompareInt64(i int64) int {
this := n.AsInt64()
if this < i {
return -1
} else if this > i {
return 1
}
return 0
}
// CompareFloat64 assumes that the Number contains a float64 and
// performs a comparison between the value and the other value. It
// returns the typical result of the compare function: -1 if the value
// is less than the other, 0 if both are equal, 1 if the value is
// greater than the other.
//
// Do not compare NaN values.
func (n *Number) CompareFloat64(f float64) int {
this := n.AsFloat64()
if this < f {
return -1
} else if this > f {
return 1
}
return 0
}
// - relations to zero
// IsPositive returns true if the actual value is greater than zero.
func (n *Number) IsPositive(kind Kind) bool {
return n.compareWithZero(kind) > 0
}
// IsNegative returns true if the actual value is less than zero.
func (n *Number) IsNegative(kind Kind) bool {
return n.compareWithZero(kind) < 0
}
// IsZero returns true if the actual value is equal to zero.
func (n *Number) IsZero(kind Kind) bool {
return n.compareWithZero(kind) == 0
}
// - misc
// Emit returns a string representation of the raw value of the
// Number. A %d is used for integral values, %f for floating point
// values.
func (n *Number) Emit(kind Kind) string {
switch kind {
case Int64Kind:
return fmt.Sprintf("%d", n.AsInt64())
case Float64Kind:
return fmt.Sprintf("%f", n.AsFloat64())
default:
return ""
}
}
// AsInterface returns the number as an interface{}, typically used
// for Kind-correct JSON conversion.
func (n *Number) AsInterface(kind Kind) interface{} {
switch kind {
case Int64Kind:
return n.AsInt64()
case Float64Kind:
return n.AsFloat64()
default:
return math.NaN()
}
}
// - private stuff
func (n *Number) compareWithZero(kind Kind) int {
switch kind {
case Int64Kind:
return n.CompareInt64(0)
case Float64Kind:
return n.CompareFloat64(0.)
default:
// you get what you deserve
return 0
}
}

View File

@@ -21,9 +21,10 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
type (
@@ -51,8 +52,8 @@ type (
}
stateValue struct {
// labels corresponds to the stateKey.distinct field.
labels *attribute.Set
// attrs corresponds to the stateKey.distinct field.
attrs *attribute.Set
// updated indicates the last sequence number when this value had
// Process() called by an accumulator.
@@ -74,12 +75,12 @@ type (
// (if !currentOwned) or it refers to an Aggregator
// owned by the processor used to accumulate multiple
// values in a single collection round.
current export.Aggregator
current aggregator.Aggregator
// cumulative, if non-nil, refers to an Aggregator owned
// by the processor used to store the last cumulative
// value.
cumulative export.Aggregator
cumulative aggregator.Aggregator
}
state struct {
@@ -131,7 +132,7 @@ type factory struct {
func NewFactory(aselector export.AggregatorSelector, tselector aggregation.TemporalitySelector, opts ...Option) export.CheckpointerFactory {
var config config
for _, opt := range opts {
opt.applyProcessor(&config)
config = opt.applyProcessor(config)
}
return factory{
aselector: aselector,
@@ -166,7 +167,7 @@ func (b *Processor) Process(accum export.Accumulation) error {
desc := accum.Descriptor()
key := stateKey{
descriptor: desc,
distinct: accum.Labels().Equivalent(),
distinct: accum.Attributes().Equivalent(),
}
agg := accum.Aggregator()
@@ -176,7 +177,7 @@ func (b *Processor) Process(accum export.Accumulation) error {
stateful := b.TemporalityFor(desc, agg.Aggregation().Kind()).MemoryRequired(desc.InstrumentKind())
newValue := &stateValue{
labels: accum.Labels(),
attrs: accum.Attributes(),
updated: b.state.finishedCollection,
stateful: stateful,
current: agg,
@@ -229,7 +230,7 @@ func (b *Processor) Process(accum export.Accumulation) error {
// indicating that the stateKey for Accumulation has already
// been seen in the same collection. When this happens, it
// implies that multiple Accumulators are being used, or that
// a single Accumulator has been configured with a label key
// a single Accumulator has been configured with a attribute key
// filter.
if !sameCollection {
@@ -369,7 +370,7 @@ func (b *state) ForEach(exporter aggregation.TemporalitySelector, f func(export.
if err := f(export.NewRecord(
key.descriptor,
value.labels,
value.attrs,
agg,
start,
b.intervalEnd,

View File

@@ -16,27 +16,27 @@ package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic"
// config contains the options for configuring a basic metric processor.
type config struct {
// Memory controls whether the processor remembers metric
// instruments and label sets that were previously reported.
// When Memory is true, Reader.ForEach() will visit
// metrics that were not updated in the most recent interval.
// Memory controls whether the processor remembers metric instruments and
// attribute sets that were previously reported. When Memory is true,
// Reader.ForEach() will visit metrics that were not updated in the most
// recent interval.
Memory bool
}
type Option interface {
applyProcessor(*config)
applyProcessor(config) config
}
// WithMemory sets the memory behavior of a Processor. If this is
// true, the processor will report metric instruments and label sets
// that were previously reported but not updated in the most recent
// interval.
// WithMemory sets the memory behavior of a Processor. If this is true, the
// processor will report metric instruments and attribute sets that were
// previously reported but not updated in the most recent interval.
func WithMemory(memory bool) Option {
return memoryOption(memory)
}
type memoryOption bool
func (m memoryOption) applyProcessor(cfg *config) {
func (m memoryOption) applyProcessor(cfg config) config {
cfg.Memory = bool(m)
return cfg
}

View File

@@ -0,0 +1,24 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package registry provides a non-standalone implementation of
MeterProvider that adds uniqueness checking for instrument descriptors
on top of other MeterProvider it wraps.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
*/
package registry // import "go.opentelemetry.io/otel/sdk/metric/registry"

View File

@@ -0,0 +1,138 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry // import "go.opentelemetry.io/otel/sdk/metric/registry"
import (
"context"
"fmt"
"sync"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
// uniqueness checking for instrument descriptors.
type UniqueInstrumentMeterImpl struct {
lock sync.Mutex
impl sdkapi.MeterImpl
state map[string]sdkapi.InstrumentImpl
}
var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil)
// ErrMetricKindMismatch is the standard error for mismatched metric
// instrument definitions.
var ErrMetricKindMismatch = fmt.Errorf(
"a metric was already registered by this name with another kind or number type")
// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl
// with the addition of instrument name uniqueness checking.
func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl {
return &UniqueInstrumentMeterImpl{
impl: impl,
state: map[string]sdkapi.InstrumentImpl{},
}
}
// MeterImpl gives the caller access to the underlying MeterImpl
// used by this UniqueInstrumentMeterImpl.
func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl {
return u.impl
}
// NewMetricKindMismatchError formats an error that describes a
// mismatched metric instrument definition.
func NewMetricKindMismatchError(desc sdkapi.Descriptor) error {
return fmt.Errorf("metric %s registered as %s %s: %w",
desc.Name(),
desc.NumberKind(),
desc.InstrumentKind(),
ErrMetricKindMismatch)
}
// Compatible determines whether two sdkapi.Descriptors are considered
// the same for the purpose of uniqueness checking.
func Compatible(candidate, existing sdkapi.Descriptor) bool {
return candidate.InstrumentKind() == existing.InstrumentKind() &&
candidate.NumberKind() == existing.NumberKind()
}
// checkUniqueness returns an ErrMetricKindMismatch error if there is
// a conflict between a descriptor that was already registered and the
// `descriptor` argument. If there is an existing compatible
// registration, this returns the already-registered instrument. If
// there is no conflict and no prior registration, returns (nil, nil).
func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) {
impl, ok := u.state[descriptor.Name()]
if !ok {
return nil, nil
}
if !Compatible(descriptor, impl.Descriptor()) {
return nil, NewMetricKindMismatchError(impl.Descriptor())
}
return impl, nil
}
// NewSyncInstrument implements sdkapi.MeterImpl.
func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) {
u.lock.Lock()
defer u.lock.Unlock()
impl, err := u.checkUniqueness(descriptor)
if err != nil {
return nil, err
} else if impl != nil {
return impl.(sdkapi.SyncImpl), nil
}
syncInst, err := u.impl.NewSyncInstrument(descriptor)
if err != nil {
return nil, err
}
u.state[descriptor.Name()] = syncInst
return syncInst, nil
}
// NewAsyncInstrument implements sdkapi.MeterImpl.
func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.AsyncImpl, error) {
u.lock.Lock()
defer u.lock.Unlock()
impl, err := u.checkUniqueness(descriptor)
if err != nil {
return nil, err
} else if impl != nil {
return impl.(sdkapi.AsyncImpl), nil
}
asyncInst, err := u.impl.NewAsyncInstrument(descriptor)
if err != nil {
return nil, err
}
u.state[descriptor.Name()] = asyncInst
return asyncInst, nil
}
func (u *UniqueInstrumentMeterImpl) RegisterCallback(insts []instrument.Asynchronous, callback func(context.Context)) error {
u.lock.Lock()
defer u.lock.Unlock()
return u.impl.RegisterCallback(insts, callback)
}

View File

@@ -23,11 +23,11 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
internal "go.opentelemetry.io/otel/internal/metric"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
type (
@@ -44,10 +44,8 @@ type (
// current maps `mapkey` to *record.
current sync.Map
// asyncInstruments is a set of
// `*asyncInstrument` instances
asyncLock sync.Mutex
asyncInstruments *internal.AsyncInstrumentState
callbackLock sync.Mutex
callbacks map[*callback]struct{}
// currentEpoch is the current epoch number. It is
// incremented in `Collect()`.
@@ -58,19 +56,27 @@ type (
// collectLock prevents simultaneous calls to Collect().
collectLock sync.Mutex
}
// asyncSortSlice has a single purpose - as a temporary
// place for sorting during labels creation to avoid
// allocation. It is cleared after use.
asyncSortSlice attribute.Sortable
callback struct {
insts map[*asyncInstrument]struct{}
f func(context.Context)
}
asyncContextKey struct{}
asyncInstrument struct {
baseInstrument
instrument.Asynchronous
}
syncInstrument struct {
instrument
baseInstrument
instrument.Synchronous
}
// mapkey uniquely describes a metric instrument in terms of
// its InstrumentID and the encoded form of its labels.
// mapkey uniquely describes a metric instrument in terms of its
// InstrumentID and the encoded form of its attributes.
mapkey struct {
descriptor *sdkapi.Descriptor
ordered attribute.Distinct
@@ -92,62 +98,41 @@ type (
// supports checking for no updates during a round.
collectedCount int64
// storage is the stored label set for this record,
// except in cases where a label set is shared due to
// batch recording.
storage attribute.Set
// attrs is the stored attribute set for this record, except in cases
// where a attribute set is shared due to batch recording.
attrs attribute.Set
// labels is the processed label set for this record.
// this may refer to the `storage` field in another
// record if this label set is shared resulting from
// `RecordBatch`.
labels *attribute.Set
// sortSlice has a single purpose - as a temporary
// place for sorting during labels creation to avoid
// allocation.
// sortSlice has a single purpose - as a temporary place for sorting
// during attributes creation to avoid allocation.
sortSlice attribute.Sortable
// inst is a pointer to the corresponding instrument.
inst *syncInstrument
inst *baseInstrument
// current implements the actual RecordOne() API,
// depending on the type of aggregation. If nil, the
// metric was disabled by the exporter.
current export.Aggregator
checkpoint export.Aggregator
current aggregator.Aggregator
checkpoint aggregator.Aggregator
}
instrument struct {
baseInstrument struct {
meter *Accumulator
descriptor sdkapi.Descriptor
}
asyncInstrument struct {
instrument
// recorders maps ordered labels to the pair of
// labelset and recorder
recorders map[attribute.Distinct]*labeledRecorder
}
labeledRecorder struct {
observedEpoch int64
labels *attribute.Set
observed export.Aggregator
}
)
var (
_ sdkapi.MeterImpl = &Accumulator{}
_ sdkapi.AsyncImpl = &asyncInstrument{}
_ sdkapi.SyncImpl = &syncInstrument{}
// ErrUninitializedInstrument is returned when an instrument is used when uninitialized.
ErrUninitializedInstrument = fmt.Errorf("use of an uninitialized instrument")
ErrBadInstrument = fmt.Errorf("use of a instrument from another SDK")
)
func (inst *instrument) Descriptor() sdkapi.Descriptor {
return inst.descriptor
func (b *baseInstrument) Descriptor() sdkapi.Descriptor {
return b.descriptor
}
func (a *asyncInstrument) Implementation() interface{} {
@@ -158,77 +143,24 @@ func (s *syncInstrument) Implementation() interface{} {
return s
}
func (a *asyncInstrument) observe(num number.Number, labels *attribute.Set) {
if err := aggregator.RangeTest(num, &a.descriptor); err != nil {
otel.Handle(err)
return
}
recorder := a.getRecorder(labels)
if recorder == nil {
// The instrument is disabled according to the
// AggregatorSelector.
return
}
if err := recorder.Update(context.Background(), num, &a.descriptor); err != nil {
otel.Handle(err)
return
}
}
func (a *asyncInstrument) getRecorder(labels *attribute.Set) export.Aggregator {
lrec, ok := a.recorders[labels.Equivalent()]
if ok {
// Note: SynchronizedMove(nil) can't return an error
_ = lrec.observed.SynchronizedMove(nil, &a.descriptor)
lrec.observedEpoch = a.meter.currentEpoch
a.recorders[labels.Equivalent()] = lrec
return lrec.observed
}
var rec export.Aggregator
a.meter.processor.AggregatorFor(&a.descriptor, &rec)
if a.recorders == nil {
a.recorders = make(map[attribute.Distinct]*labeledRecorder)
}
// This may store nil recorder in the map, thus disabling the
// asyncInstrument for the labelset for good. This is intentional,
// but will be revisited later.
a.recorders[labels.Equivalent()] = &labeledRecorder{
observed: rec,
labels: labels,
observedEpoch: a.meter.currentEpoch,
}
return rec
}
// acquireHandle gets or creates a `*record` corresponding to `kvs`,
// the input labels. The second argument `labels` is passed in to
// support re-use of the orderedLabels computed by a previous
// measurement in the same batch. This performs two allocations
// in the common case.
func (s *syncInstrument) acquireHandle(kvs []attribute.KeyValue, labelPtr *attribute.Set) *record {
var rec *record
var equiv attribute.Distinct
// the input attributes.
func (b *baseInstrument) acquireHandle(kvs []attribute.KeyValue) *record {
if labelPtr == nil {
// This memory allocation may not be used, but it's
// needed for the `sortSlice` field, to avoid an
// allocation while sorting.
rec = &record{}
rec.storage = attribute.NewSetWithSortable(kvs, &rec.sortSlice)
rec.labels = &rec.storage
equiv = rec.storage.Equivalent()
} else {
equiv = labelPtr.Equivalent()
}
// This memory allocation may not be used, but it's
// needed for the `sortSlice` field, to avoid an
// allocation while sorting.
rec := &record{}
rec.attrs = attribute.NewSetWithSortable(kvs, &rec.sortSlice)
// Create lookup key for sync.Map (one allocation, as this
// passes through an interface{})
mk := mapkey{
descriptor: &s.descriptor,
ordered: equiv,
descriptor: &b.descriptor,
ordered: rec.attrs.Equivalent(),
}
if actual, ok := s.meter.current.Load(mk); ok {
if actual, ok := b.meter.current.Load(mk); ok {
// Existing record case.
existingRec := actual.(*record)
if existingRec.refMapped.ref() {
@@ -239,19 +171,15 @@ func (s *syncInstrument) acquireHandle(kvs []attribute.KeyValue, labelPtr *attri
// This entry is no longer mapped, try to add a new entry.
}
if rec == nil {
rec = &record{}
rec.labels = labelPtr
}
rec.refMapped = refcountMapped{value: 2}
rec.inst = s
rec.inst = b
s.meter.processor.AggregatorFor(&s.descriptor, &rec.current, &rec.checkpoint)
b.meter.processor.AggregatorFor(&b.descriptor, &rec.current, &rec.checkpoint)
for {
// Load/Store: there's a memory allocation to place `mk` into
// an interface here.
if actual, loaded := s.meter.current.LoadOrStore(mk, rec); loaded {
if actual, loaded := b.meter.current.LoadOrStore(mk, rec); loaded {
// Existing record case. Cannot change rec here because if fail
// will try to add rec again to avoid new allocations.
oldRec := actual.(*record)
@@ -278,11 +206,22 @@ func (s *syncInstrument) acquireHandle(kvs []attribute.KeyValue, labelPtr *attri
}
}
// RecordOne captures a single synchronous metric event.
//
// The order of the input array `kvs` may be sorted after the function is called.
func (s *syncInstrument) RecordOne(ctx context.Context, num number.Number, kvs []attribute.KeyValue) {
h := s.acquireHandle(kvs, nil)
h := s.acquireHandle(kvs)
defer h.unbind()
h.RecordOne(ctx, num)
h.captureOne(ctx, num)
}
// ObserveOne captures a single asynchronous metric event.
// The order of the input array `kvs` may be sorted after the function is called.
func (a *asyncInstrument) ObserveOne(ctx context.Context, num number.Number, attrs []attribute.KeyValue) {
h := a.acquireHandle(attrs)
defer h.unbind()
h.captureOne(ctx, num)
}
// NewAccumulator constructs a new Accumulator for the given
@@ -296,15 +235,17 @@ func (s *syncInstrument) RecordOne(ctx context.Context, num number.Number, kvs [
// own periodic collection.
func NewAccumulator(processor export.Processor) *Accumulator {
return &Accumulator{
processor: processor,
asyncInstruments: internal.NewAsyncInstrumentState(),
processor: processor,
callbacks: map[*callback]struct{}{},
}
}
var _ sdkapi.MeterImpl = &Accumulator{}
// NewSyncInstrument implements sdkapi.MetricImpl.
func (m *Accumulator) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) {
return &syncInstrument{
instrument: instrument{
baseInstrument: baseInstrument{
descriptor: descriptor,
meter: m,
},
@@ -312,19 +253,40 @@ func (m *Accumulator) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.Sy
}
// NewAsyncInstrument implements sdkapi.MetricImpl.
func (m *Accumulator) NewAsyncInstrument(descriptor sdkapi.Descriptor, runner sdkapi.AsyncRunner) (sdkapi.AsyncImpl, error) {
func (m *Accumulator) NewAsyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.AsyncImpl, error) {
a := &asyncInstrument{
instrument: instrument{
baseInstrument: baseInstrument{
descriptor: descriptor,
meter: m,
},
}
m.asyncLock.Lock()
defer m.asyncLock.Unlock()
m.asyncInstruments.Register(a, runner)
return a, nil
}
func (m *Accumulator) RegisterCallback(insts []instrument.Asynchronous, f func(context.Context)) error {
cb := &callback{
insts: map[*asyncInstrument]struct{}{},
f: f,
}
for _, inst := range insts {
impl, ok := inst.(sdkapi.AsyncImpl)
if !ok {
return ErrBadInstrument
}
ai, err := m.fromAsync(impl)
if err != nil {
return err
}
cb.insts[ai] = struct{}{}
}
m.callbackLock.Lock()
defer m.callbackLock.Unlock()
m.callbacks[cb] = struct{}{}
return nil
}
// Collect traverses the list of active records and observers and
// exports data for each active instrument. Collect() may not be
// called concurrently.
@@ -337,14 +299,14 @@ func (m *Accumulator) Collect(ctx context.Context) int {
m.collectLock.Lock()
defer m.collectLock.Unlock()
checkpointed := m.observeAsyncInstruments(ctx)
checkpointed += m.collectSyncInstruments()
m.runAsyncCallbacks(ctx)
checkpointed := m.collectInstruments()
m.currentEpoch++
return checkpointed
}
func (m *Accumulator) collectSyncInstruments() int {
func (m *Accumulator) collectInstruments() int {
checkpointed := 0
m.current.Range(func(key interface{}, value interface{}) bool {
@@ -387,35 +349,17 @@ func (m *Accumulator) collectSyncInstruments() int {
return checkpointed
}
// CollectAsync implements internal.AsyncCollector.
// The order of the input array `kvs` may be sorted after the function is called.
func (m *Accumulator) CollectAsync(kv []attribute.KeyValue, obs ...sdkapi.Observation) {
labels := attribute.NewSetWithSortable(kv, &m.asyncSortSlice)
func (m *Accumulator) runAsyncCallbacks(ctx context.Context) {
m.callbackLock.Lock()
defer m.callbackLock.Unlock()
for _, ob := range obs {
if a := m.fromAsync(ob.AsyncImpl()); a != nil {
a.observe(ob.Number(), &labels)
}
ctx = context.WithValue(ctx, asyncContextKey{}, m)
for cb := range m.callbacks {
cb.f(ctx)
}
}
func (m *Accumulator) observeAsyncInstruments(ctx context.Context) int {
m.asyncLock.Lock()
defer m.asyncLock.Unlock()
asyncCollected := 0
m.asyncInstruments.Run(ctx, m)
for _, inst := range m.asyncInstruments.Instruments() {
if a := m.fromAsync(inst); a != nil {
asyncCollected += m.checkpointAsync(a)
}
}
return asyncCollected
}
func (m *Accumulator) checkpointRecord(r *record) int {
if r.current == nil {
return 0
@@ -426,7 +370,7 @@ func (m *Accumulator) checkpointRecord(r *record) int {
return 0
}
a := export.NewAccumulation(&r.inst.descriptor, r.labels, r.checkpoint)
a := export.NewAccumulation(&r.inst.descriptor, &r.attrs, r.checkpoint)
err = m.processor.Process(a)
if err != nil {
otel.Handle(err)
@@ -434,63 +378,7 @@ func (m *Accumulator) checkpointRecord(r *record) int {
return 1
}
func (m *Accumulator) checkpointAsync(a *asyncInstrument) int {
if len(a.recorders) == 0 {
return 0
}
checkpointed := 0
for encodedLabels, lrec := range a.recorders {
lrec := lrec
epochDiff := m.currentEpoch - lrec.observedEpoch
if epochDiff == 0 {
if lrec.observed != nil {
a := export.NewAccumulation(&a.descriptor, lrec.labels, lrec.observed)
err := m.processor.Process(a)
if err != nil {
otel.Handle(err)
}
checkpointed++
}
} else if epochDiff > 1 {
// This is second collection cycle with no
// observations for this labelset. Remove the
// recorder.
delete(a.recorders, encodedLabels)
}
}
if len(a.recorders) == 0 {
a.recorders = nil
}
return checkpointed
}
// RecordBatch enters a batch of metric events.
// The order of the input array `kvs` may be sorted after the function is called.
func (m *Accumulator) RecordBatch(ctx context.Context, kvs []attribute.KeyValue, measurements ...sdkapi.Measurement) {
// Labels will be computed the first time acquireHandle is
// called. Subsequent calls to acquireHandle will re-use the
// previously computed value instead of recomputing the
// ordered labels.
var labelsPtr *attribute.Set
for i, meas := range measurements {
s := m.fromSync(meas.SyncImpl())
if s == nil {
continue
}
h := s.acquireHandle(kvs, labelsPtr)
// Re-use labels for the next measurement.
if i == 0 {
labelsPtr = h.labels
}
defer h.unbind()
h.RecordOne(ctx, meas.Number())
}
}
// RecordOne implements sdkapi.SyncImpl.
func (r *record) RecordOne(ctx context.Context, num number.Number) {
func (r *record) captureOne(ctx context.Context, num number.Number) {
if r.current == nil {
// The instrument is disabled according to the AggregatorSelector.
return
@@ -515,30 +403,20 @@ func (r *record) unbind() {
func (r *record) mapkey() mapkey {
return mapkey{
descriptor: &r.inst.descriptor,
ordered: r.labels.Equivalent(),
ordered: r.attrs.Equivalent(),
}
}
// fromSync gets a sync implementation object, checking for
// uninitialized instruments and instruments created by another SDK.
func (m *Accumulator) fromSync(sync sdkapi.SyncImpl) *syncInstrument {
if sync != nil {
if inst, ok := sync.Implementation().(*syncInstrument); ok {
return inst
}
}
otel.Handle(ErrUninitializedInstrument)
return nil
}
// fromSync gets an async implementation object, checking for
// uninitialized instruments and instruments created by another SDK.
func (m *Accumulator) fromAsync(async sdkapi.AsyncImpl) *asyncInstrument {
if async != nil {
if inst, ok := async.Implementation().(*asyncInstrument); ok {
return inst
}
func (m *Accumulator) fromAsync(async sdkapi.AsyncImpl) (*asyncInstrument, error) {
if async == nil {
return nil, ErrUninitializedInstrument
}
otel.Handle(ErrUninitializedInstrument)
return nil
inst, ok := async.Implementation().(*asyncInstrument)
if !ok {
return nil, ErrBadInstrument
}
return inst, nil
}

View File

@@ -0,0 +1,70 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
import (
"go.opentelemetry.io/otel/metric/unit"
"go.opentelemetry.io/otel/sdk/metric/number"
)
// Descriptor contains all the settings that describe an instrument,
// including its name, metric kind, number kind, and the configurable
// options.
type Descriptor struct {
name string
instrumentKind InstrumentKind
numberKind number.Kind
description string
unit unit.Unit
}
// NewDescriptor returns a Descriptor with the given contents.
func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor {
return Descriptor{
name: name,
instrumentKind: ikind,
numberKind: nkind,
description: description,
unit: unit,
}
}
// Name returns the metric instrument's name.
func (d Descriptor) Name() string {
return d.name
}
// InstrumentKind returns the specific kind of instrument.
func (d Descriptor) InstrumentKind() InstrumentKind {
return d.instrumentKind
}
// Description provides a human-readable description of the metric
// instrument.
func (d Descriptor) Description() string {
return d.description
}
// Unit describes the units of the metric instrument. Unitless
// metrics return the empty string.
func (d Descriptor) Unit() unit.Unit {
return d.unit
}
// NumberKind returns whether this instrument is declared over int64,
// float64, or uint64 values.
func (d Descriptor) NumberKind() number.Kind {
return d.numberKind
}

View File

@@ -0,0 +1,80 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate stringer -type=InstrumentKind
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
// InstrumentKind describes the kind of instrument.
type InstrumentKind int8
const (
// HistogramInstrumentKind indicates a Histogram instrument.
HistogramInstrumentKind InstrumentKind = iota
// GaugeObserverInstrumentKind indicates an GaugeObserver instrument.
GaugeObserverInstrumentKind
// CounterInstrumentKind indicates a Counter instrument.
CounterInstrumentKind
// UpDownCounterInstrumentKind indicates a UpDownCounter instrument.
UpDownCounterInstrumentKind
// CounterObserverInstrumentKind indicates a CounterObserver instrument.
CounterObserverInstrumentKind
// UpDownCounterObserverInstrumentKind indicates a UpDownCounterObserver
// instrument.
UpDownCounterObserverInstrumentKind
)
// Synchronous returns whether this is a synchronous kind of instrument.
func (k InstrumentKind) Synchronous() bool {
switch k {
case CounterInstrumentKind, UpDownCounterInstrumentKind, HistogramInstrumentKind:
return true
}
return false
}
// Asynchronous returns whether this is an asynchronous kind of instrument.
func (k InstrumentKind) Asynchronous() bool {
return !k.Synchronous()
}
// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping).
func (k InstrumentKind) Adding() bool {
switch k {
case CounterInstrumentKind, UpDownCounterInstrumentKind, CounterObserverInstrumentKind, UpDownCounterObserverInstrumentKind:
return true
}
return false
}
// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding).
func (k InstrumentKind) Grouping() bool {
return !k.Adding()
}
// Monotonic returns whether this kind of instrument exposes a non-decreasing sum.
func (k InstrumentKind) Monotonic() bool {
switch k {
case CounterInstrumentKind, CounterObserverInstrumentKind:
return true
}
return false
}
// PrecomputedSum returns whether this kind of instrument receives precomputed sums.
func (k InstrumentKind) PrecomputedSum() bool {
return k.Adding() && k.Asynchronous()
}

View File

@@ -0,0 +1,28 @@
// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT.
package sdkapi
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[HistogramInstrumentKind-0]
_ = x[GaugeObserverInstrumentKind-1]
_ = x[CounterInstrumentKind-2]
_ = x[UpDownCounterInstrumentKind-3]
_ = x[CounterObserverInstrumentKind-4]
_ = x[UpDownCounterObserverInstrumentKind-5]
}
const _InstrumentKind_name = "HistogramInstrumentKindGaugeObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindCounterObserverInstrumentKindUpDownCounterObserverInstrumentKind"
var _InstrumentKind_index = [...]uint8{0, 23, 50, 71, 98, 127, 162}
func (i InstrumentKind) String() string {
if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) {
return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
}

View File

@@ -0,0 +1,83 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/sdk/metric/number"
) // import (
// "context"
// "go.opentelemetry.io/otel/attribute"
// "go.opentelemetry.io/otel/sdk/metric/number"
// )
type noopInstrument struct {
descriptor Descriptor
}
type noopSyncInstrument struct {
noopInstrument
instrument.Synchronous
}
type noopAsyncInstrument struct {
noopInstrument
instrument.Asynchronous
}
var _ SyncImpl = noopSyncInstrument{}
var _ AsyncImpl = noopAsyncInstrument{}
// NewNoopSyncInstrument returns a No-op implementation of the
// synchronous instrument interface.
func NewNoopSyncInstrument() SyncImpl {
return noopSyncInstrument{
noopInstrument: noopInstrument{
descriptor: Descriptor{
instrumentKind: CounterInstrumentKind,
},
},
}
}
// NewNoopAsyncInstrument returns a No-op implementation of the
// asynchronous instrument interface.
func NewNoopAsyncInstrument() AsyncImpl {
return noopAsyncInstrument{
noopInstrument: noopInstrument{
descriptor: Descriptor{
instrumentKind: CounterObserverInstrumentKind,
},
},
}
}
func (noopInstrument) Implementation() interface{} {
return nil
}
func (n noopInstrument) Descriptor() Descriptor {
return n.descriptor
}
func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) {
}
func (noopAsyncInstrument) ObserveOne(context.Context, number.Number, []attribute.KeyValue) {
}

View File

@@ -0,0 +1,162 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/sdk/metric/number"
)
// MeterImpl is the interface an SDK must implement to supply a Meter
// implementation.
type MeterImpl interface {
// NewSyncInstrument returns a newly constructed
// synchronous instrument implementation or an error, should
// one occur.
NewSyncInstrument(descriptor Descriptor) (SyncImpl, error)
// NewAsyncInstrument returns a newly constructed
// asynchronous instrument implementation or an error, should
// one occur.
NewAsyncInstrument(descriptor Descriptor) (AsyncImpl, error)
// Etc.
RegisterCallback(insts []instrument.Asynchronous, callback func(context.Context)) error
}
// InstrumentImpl is a common interface for synchronous and
// asynchronous instruments.
type InstrumentImpl interface {
// Implementation returns the underlying implementation of the
// instrument, which allows the implementation to gain access
// to its own representation especially from a `Measurement`.
Implementation() interface{}
// Descriptor returns a copy of the instrument's Descriptor.
Descriptor() Descriptor
}
// SyncImpl is the implementation-level interface to a generic
// synchronous instrument (e.g., Histogram and Counter instruments).
type SyncImpl interface {
InstrumentImpl
instrument.Synchronous
// RecordOne captures a single synchronous metric event.
RecordOne(ctx context.Context, number number.Number, attrs []attribute.KeyValue)
}
// AsyncImpl is an implementation-level interface to an
// asynchronous instrument (e.g., Observer instruments).
type AsyncImpl interface {
InstrumentImpl
instrument.Asynchronous
// ObserveOne captures a single synchronous metric event.
ObserveOne(ctx context.Context, number number.Number, attrs []attribute.KeyValue)
}
// AsyncRunner is expected to convert into an AsyncSingleRunner or an
// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner
// does not satisfy one of these interfaces.
type AsyncRunner interface {
// AnyRunner is a non-exported method with no functional use
// other than to make this a non-empty interface.
AnyRunner()
}
// AsyncSingleRunner is an interface implemented by single-observer
// callbacks.
type AsyncSingleRunner interface {
// Run accepts a single instrument and function for capturing
// observations of that instrument. Each call to the function
// receives one captured observation. (The function accepts
// multiple observations so the same implementation can be
// used for batch runners.)
Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation))
AsyncRunner
}
// AsyncBatchRunner is an interface implemented by batch-observer
// callbacks.
type AsyncBatchRunner interface {
// Run accepts a function for capturing observations of
// multiple instruments.
Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation))
AsyncRunner
}
// NewMeasurement constructs a single observation, a binding between
// an asynchronous instrument and a number.
func NewMeasurement(instrument SyncImpl, number number.Number) Measurement {
return Measurement{
instrument: instrument,
number: number,
}
}
// Measurement is a low-level type used with synchronous instruments
// as a direct interface to the SDK via `RecordBatch`.
type Measurement struct {
// number needs to be aligned for 64-bit atomic operations.
number number.Number
instrument SyncImpl
}
// SyncImpl returns the instrument that created this measurement.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (m Measurement) SyncImpl() SyncImpl {
return m.instrument
}
// Number returns a number recorded in this measurement.
func (m Measurement) Number() number.Number {
return m.number
}
// NewObservation constructs a single observation, a binding between
// an asynchronous instrument and a number.
func NewObservation(instrument AsyncImpl, number number.Number) Observation {
return Observation{
instrument: instrument,
number: number,
}
}
// Observation is a low-level type used with asynchronous instruments
// as a direct interface to the SDK via `BatchObserver`.
type Observation struct {
// number needs to be aligned for 64-bit atomic operations.
number number.Number
instrument AsyncImpl
}
// AsyncImpl returns the instrument that created this observation.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (m Observation) AsyncImpl() AsyncImpl {
return m.instrument
}
// Number returns a number recorded in this observation.
func (m Observation) Number() number.Number {
return m.number
}

View File

@@ -0,0 +1,181 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncint64"
"go.opentelemetry.io/otel/sdk/metric/number"
)
type (
meter struct{ MeterImpl }
sfMeter struct{ meter }
siMeter struct{ meter }
afMeter struct{ meter }
aiMeter struct{ meter }
iAdder struct{ SyncImpl }
fAdder struct{ SyncImpl }
iRecorder struct{ SyncImpl }
fRecorder struct{ SyncImpl }
iObserver struct{ AsyncImpl }
fObserver struct{ AsyncImpl }
)
func WrapMeterImpl(impl MeterImpl) metric.Meter {
return meter{impl}
}
func UnwrapMeterImpl(m metric.Meter) MeterImpl {
mm, ok := m.(meter)
if !ok {
return nil
}
return mm.MeterImpl
}
func (m meter) AsyncFloat64() asyncfloat64.InstrumentProvider {
return afMeter{m}
}
func (m meter) AsyncInt64() asyncint64.InstrumentProvider {
return aiMeter{m}
}
func (m meter) SyncFloat64() syncfloat64.InstrumentProvider {
return sfMeter{m}
}
func (m meter) SyncInt64() syncint64.InstrumentProvider {
return siMeter{m}
}
func (m meter) RegisterCallback(insts []instrument.Asynchronous, cb func(ctx context.Context)) error {
return m.MeterImpl.RegisterCallback(insts, cb)
}
func (m meter) newSync(name string, ikind InstrumentKind, nkind number.Kind, opts []instrument.Option) (SyncImpl, error) {
cfg := instrument.NewConfig(opts...)
return m.NewSyncInstrument(NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit()))
}
func (m meter) newAsync(name string, ikind InstrumentKind, nkind number.Kind, opts []instrument.Option) (AsyncImpl, error) {
cfg := instrument.NewConfig(opts...)
return m.NewAsyncInstrument(NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit()))
}
func (m afMeter) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) {
inst, err := m.newAsync(name, CounterObserverInstrumentKind, number.Float64Kind, opts)
return fObserver{inst}, err
}
func (m afMeter) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
inst, err := m.newAsync(name, UpDownCounterObserverInstrumentKind, number.Float64Kind, opts)
return fObserver{inst}, err
}
func (m afMeter) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) {
inst, err := m.newAsync(name, GaugeObserverInstrumentKind, number.Float64Kind, opts)
return fObserver{inst}, err
}
func (m aiMeter) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) {
inst, err := m.newAsync(name, CounterObserverInstrumentKind, number.Int64Kind, opts)
return iObserver{inst}, err
}
func (m aiMeter) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) {
inst, err := m.newAsync(name, UpDownCounterObserverInstrumentKind, number.Int64Kind, opts)
return iObserver{inst}, err
}
func (m aiMeter) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) {
inst, err := m.newAsync(name, GaugeObserverInstrumentKind, number.Int64Kind, opts)
return iObserver{inst}, err
}
func (m sfMeter) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) {
inst, err := m.newSync(name, CounterInstrumentKind, number.Float64Kind, opts)
return fAdder{inst}, err
}
func (m sfMeter) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) {
inst, err := m.newSync(name, UpDownCounterInstrumentKind, number.Float64Kind, opts)
return fAdder{inst}, err
}
func (m sfMeter) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) {
inst, err := m.newSync(name, HistogramInstrumentKind, number.Float64Kind, opts)
return fRecorder{inst}, err
}
func (m siMeter) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) {
inst, err := m.newSync(name, CounterInstrumentKind, number.Int64Kind, opts)
return iAdder{inst}, err
}
func (m siMeter) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) {
inst, err := m.newSync(name, UpDownCounterInstrumentKind, number.Int64Kind, opts)
return iAdder{inst}, err
}
func (m siMeter) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) {
inst, err := m.newSync(name, HistogramInstrumentKind, number.Int64Kind, opts)
return iRecorder{inst}, err
}
func (a fAdder) Add(ctx context.Context, value float64, attrs ...attribute.KeyValue) {
if a.SyncImpl != nil {
a.SyncImpl.RecordOne(ctx, number.NewFloat64Number(value), attrs)
}
}
func (a iAdder) Add(ctx context.Context, value int64, attrs ...attribute.KeyValue) {
if a.SyncImpl != nil {
a.SyncImpl.RecordOne(ctx, number.NewInt64Number(value), attrs)
}
}
func (a fRecorder) Record(ctx context.Context, value float64, attrs ...attribute.KeyValue) {
if a.SyncImpl != nil {
a.SyncImpl.RecordOne(ctx, number.NewFloat64Number(value), attrs)
}
}
func (a iRecorder) Record(ctx context.Context, value int64, attrs ...attribute.KeyValue) {
if a.SyncImpl != nil {
a.SyncImpl.RecordOne(ctx, number.NewInt64Number(value), attrs)
}
}
func (a fObserver) Observe(ctx context.Context, value float64, attrs ...attribute.KeyValue) {
if a.AsyncImpl != nil {
a.AsyncImpl.ObserveOne(ctx, number.NewFloat64Number(value), attrs)
}
}
func (a iObserver) Observe(ctx context.Context, value int64, attrs ...attribute.KeyValue) {
if a.AsyncImpl != nil {
a.AsyncImpl.ObserveOne(ctx, number.NewInt64Number(value), attrs)
}
}

View File

@@ -15,11 +15,12 @@
package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple"
import (
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
type (
@@ -50,21 +51,21 @@ func NewWithHistogramDistribution(options ...histogram.Option) export.Aggregator
return selectorHistogram{options: options}
}
func sumAggs(aggPtrs []*export.Aggregator) {
func sumAggs(aggPtrs []*aggregator.Aggregator) {
aggs := sum.New(len(aggPtrs))
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}
}
func lastValueAggs(aggPtrs []*export.Aggregator) {
func lastValueAggs(aggPtrs []*aggregator.Aggregator) {
aggs := lastvalue.New(len(aggPtrs))
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}
}
func (selectorInexpensive) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*export.Aggregator) {
func (selectorInexpensive) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) {
switch descriptor.InstrumentKind() {
case sdkapi.GaugeObserverInstrumentKind:
lastValueAggs(aggPtrs)
@@ -78,7 +79,7 @@ func (selectorInexpensive) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs
}
}
func (s selectorHistogram) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*export.Aggregator) {
func (s selectorHistogram) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) {
switch descriptor.InstrumentKind() {
case sdkapi.GaugeObserverInstrumentKind:
lastValueAggs(aggPtrs)