chore: upgrade dependencies

This commit is contained in:
2022-06-08 00:07:52 +02:00
parent e22bdf96d9
commit 1e6966495c
185 changed files with 5385 additions and 4081 deletions

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -15,19 +15,81 @@
package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator"
import (
"context"
"fmt"
"math"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// Aggregator implements a specific aggregation behavior, e.g., a
// behavior to track a sequence of updates to an instrument. Counter
// instruments commonly use a simple Sum aggregator, but for the
// distribution instruments (Histogram, GaugeObserver) there are a
// number of possible aggregators with different cost and accuracy
// tradeoffs.
//
// Note that any Aggregator may be attached to any instrument--this is
// the result of the OpenTelemetry API/SDK separation. It is possible
// to attach a Sum aggregator to a Histogram instrument.
type Aggregator interface {
// Aggregation returns an Aggregation interface to access the
// current state of this Aggregator. The caller is
// responsible for synchronization and must not call any the
// other methods in this interface concurrently while using
// the Aggregation.
Aggregation() aggregation.Aggregation
// Update receives a new measured value and incorporates it
// into the aggregation. Update() calls may be called
// concurrently.
//
// Descriptor.NumberKind() should be consulted to determine
// whether the provided number is an int64 or float64.
//
// The Context argument comes from user-level code and could be
// inspected for a `correlation.Map` or `trace.SpanContext`.
Update(ctx context.Context, number number.Number, descriptor *sdkapi.Descriptor) error
// SynchronizedMove is called during collection to finish one
// period of aggregation by atomically saving the
// currently-updating state into the argument Aggregator AND
// resetting the current value to the zero state.
//
// SynchronizedMove() is called concurrently with Update(). These
// two methods must be synchronized with respect to each
// other, for correctness.
//
// After saving a synchronized copy, the Aggregator can be converted
// into one or more of the interfaces in the `aggregation` sub-package,
// according to kind of Aggregator that was selected.
//
// This method will return an InconsistentAggregatorError if
// this Aggregator cannot be copied into the destination due
// to an incompatible type.
//
// This call has no Context argument because it is expected to
// perform only computation.
//
// When called with a nil `destination`, this Aggregator is reset
// and the current value is discarded.
SynchronizedMove(destination Aggregator, descriptor *sdkapi.Descriptor) error
// Merge combines the checkpointed state from the argument
// Aggregator into this Aggregator. Merge is not synchronized
// with respect to Update or SynchronizedMove.
//
// The owner of an Aggregator being merged is responsible for
// synchronization of both Aggregator states.
Merge(aggregator Aggregator, descriptor *sdkapi.Descriptor) error
}
// NewInconsistentAggregatorError formats an error describing an attempt to
// Checkpoint or Merge different-type aggregators. The result can be unwrapped as
// an ErrInconsistentType.
func NewInconsistentAggregatorError(a1, a2 export.Aggregator) error {
func NewInconsistentAggregatorError(a1, a2 Aggregator) error {
return fmt.Errorf("%w: %T and %T", aggregation.ErrInconsistentType, a1, a2)
}

View File

@@ -19,11 +19,10 @@ import (
"sort"
"sync"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// Note: This code uses a Mutex to govern access to the exclusive
@@ -89,7 +88,7 @@ var defaultFloat64ExplicitBoundaries = []float64{.005, .01, .025, .05, .1, .25,
const defaultInt64ExplicitBoundaryMultiplier = 1e6
// defaultInt64ExplicitBoundaries applies a multiplier to the default
// float64 boundaries: [ 5K, 10K, 25K, ..., 2.5M, 5M, 10M ]
// float64 boundaries: [ 5K, 10K, 25K, ..., 2.5M, 5M, 10M ].
var defaultInt64ExplicitBoundaries = func(bounds []float64) (asint []float64) {
for _, f := range bounds {
asint = append(asint, defaultInt64ExplicitBoundaryMultiplier*f)
@@ -97,7 +96,7 @@ var defaultInt64ExplicitBoundaries = func(bounds []float64) (asint []float64) {
return
}(defaultFloat64ExplicitBoundaries)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.Aggregator = &Aggregator{}
var _ aggregation.Sum = &Aggregator{}
var _ aggregation.Count = &Aggregator{}
var _ aggregation.Histogram = &Aggregator{}
@@ -174,7 +173,7 @@ func (c *Aggregator) Histogram() (aggregation.Buckets, error) {
// the empty set. Since no locks are taken, there is a chance that
// the independent Sum, Count and Bucket Count are not consistent with each
// other.
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *sdkapi.Descriptor) error {
func (c *Aggregator) SynchronizedMove(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
o, _ := oa.(*Aggregator)
if oa != nil && o == nil {
@@ -254,7 +253,7 @@ func (c *Aggregator) Update(_ context.Context, number number.Number, desc *sdkap
}
// Merge combines two histograms that have the same buckets into a single one.
func (c *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error {
func (c *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)

View File

@@ -20,11 +20,10 @@ import (
"time"
"unsafe"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
type (
@@ -43,15 +42,14 @@ type (
// value needs to be aligned for 64-bit atomic operations.
value number.Number
// timestamp indicates when this record was submitted.
// this can be used to pick a winner when multiple
// records contain lastValue data for the same labels due
// to races.
// timestamp indicates when this record was submitted. This can be
// used to pick a winner when multiple records contain lastValue data
// for the same attributes due to races.
timestamp time.Time
}
)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.Aggregator = &Aggregator{}
var _ aggregation.LastValue = &Aggregator{}
// An unset lastValue has zero timestamp and zero value.
@@ -92,7 +90,7 @@ func (g *Aggregator) LastValue() (number.Number, time.Time, error) {
}
// SynchronizedMove atomically saves the current value.
func (g *Aggregator) SynchronizedMove(oa export.Aggregator, _ *sdkapi.Descriptor) error {
func (g *Aggregator) SynchronizedMove(oa aggregator.Aggregator, _ *sdkapi.Descriptor) error {
if oa == nil {
atomic.StorePointer(&g.value, unsafe.Pointer(unsetLastValue))
return nil
@@ -117,7 +115,7 @@ func (g *Aggregator) Update(_ context.Context, number number.Number, desc *sdkap
// Merge combines state from two aggregators. The most-recently set
// value is chosen.
func (g *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error {
func (g *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(g, oa)

View File

@@ -17,11 +17,10 @@ package sum // import "go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
import (
"context"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// Aggregator aggregates counter events.
@@ -31,7 +30,7 @@ type Aggregator struct {
value number.Number
}
var _ export.Aggregator = &Aggregator{}
var _ aggregator.Aggregator = &Aggregator{}
var _ aggregation.Sum = &Aggregator{}
// New returns a new counter aggregator implemented by atomic
@@ -59,7 +58,7 @@ func (c *Aggregator) Sum() (number.Number, error) {
// SynchronizedMove atomically saves the current value into oa and resets the
// current sum to zero.
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, _ *sdkapi.Descriptor) error {
func (c *Aggregator) SynchronizedMove(oa aggregator.Aggregator, _ *sdkapi.Descriptor) error {
if oa == nil {
c.value.SetRawAtomic(0)
return nil
@@ -79,7 +78,7 @@ func (c *Aggregator) Update(_ context.Context, num number.Number, desc *sdkapi.D
}
// Merge combines two counters by adding their sums.
func (c *Aggregator) Merge(oa export.Aggregator, desc *sdkapi.Descriptor) error {
func (c *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)

View File

@@ -16,6 +16,7 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
import "unsafe"
// Deprecated: will be removed soon.
func AtomicFieldOffsets() map[string]uintptr {
return map[string]uintptr{
"record.refMapped.value": unsafe.Offsetof(record{}.refMapped.value),

View File

@@ -18,7 +18,7 @@ import (
"time"
"go.opentelemetry.io/otel"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/resource"
)
@@ -62,7 +62,7 @@ type config struct {
// Option is the interface that applies the value to a configuration option.
type Option interface {
// apply sets the Option value of a Config.
apply(*config)
apply(config) config
}
// WithResource sets the Resource configuration option of a Config by merging it
@@ -73,12 +73,13 @@ func WithResource(r *resource.Resource) Option {
type resourceOption struct{ *resource.Resource }
func (o resourceOption) apply(cfg *config) {
func (o resourceOption) apply(cfg config) config {
res, err := resource.Merge(cfg.Resource, o.Resource)
if err != nil {
otel.Handle(err)
}
cfg.Resource = res
return cfg
}
// WithCollectPeriod sets the CollectPeriod configuration option of a Config.
@@ -88,8 +89,9 @@ func WithCollectPeriod(period time.Duration) Option {
type collectPeriodOption time.Duration
func (o collectPeriodOption) apply(cfg *config) {
func (o collectPeriodOption) apply(cfg config) config {
cfg.CollectPeriod = time.Duration(o)
return cfg
}
// WithCollectTimeout sets the CollectTimeout configuration option of a Config.
@@ -99,8 +101,9 @@ func WithCollectTimeout(timeout time.Duration) Option {
type collectTimeoutOption time.Duration
func (o collectTimeoutOption) apply(cfg *config) {
func (o collectTimeoutOption) apply(cfg config) config {
cfg.CollectTimeout = time.Duration(o)
return cfg
}
// WithExporter sets the exporter configuration option of a Config.
@@ -110,8 +113,9 @@ func WithExporter(exporter export.Exporter) Option {
type exporterOption struct{ exporter export.Exporter }
func (o exporterOption) apply(cfg *config) {
func (o exporterOption) apply(cfg config) config {
cfg.Exporter = o.exporter
return cfg
}
// WithPushTimeout sets the PushTimeout configuration option of a Config.
@@ -121,6 +125,7 @@ func WithPushTimeout(timeout time.Duration) Option {
type pushTimeoutOption time.Duration
func (o pushTimeoutOption) apply(cfg *config) {
func (o pushTimeoutOption) apply(cfg config) config {
cfg.PushTimeout = time.Duration(o)
return cfg
}

View File

@@ -21,12 +21,13 @@ import (
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/metric/registry"
"go.opentelemetry.io/otel/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/instrumentation"
sdk "go.opentelemetry.io/otel/sdk/metric"
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/metric/registry"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
"go.opentelemetry.io/otel/sdk/resource"
)
@@ -99,7 +100,7 @@ func (c *Controller) Meter(instrumentationName string, opts ...metric.MeterOptio
library: library,
}))
}
return metric.WrapMeterImpl(m.(*registry.UniqueInstrumentMeterImpl))
return sdkapi.WrapMeterImpl(m.(*registry.UniqueInstrumentMeterImpl))
}
type accumulatorCheckpointer struct {
@@ -108,17 +109,19 @@ type accumulatorCheckpointer struct {
library instrumentation.Library
}
var _ sdkapi.MeterImpl = &accumulatorCheckpointer{}
// New constructs a Controller using the provided checkpointer factory
// and options (including optional exporter) to configure a metric
// export pipeline.
func New(checkpointerFactory export.CheckpointerFactory, opts ...Option) *Controller {
c := &config{
c := config{
CollectPeriod: DefaultPeriod,
CollectTimeout: DefaultPeriod,
PushTimeout: DefaultPeriod,
}
for _, opt := range opts {
opt.apply(c)
c = opt.apply(c)
}
if c.Resource == nil {
c.Resource = resource.Default()

View File

@@ -39,15 +39,15 @@ instrument callbacks.
Internal Structure
Each observer also has its own kind of record stored in the SDK. This
record contains a set of recorders for every specific label set used in the
callback.
record contains a set of recorders for every specific attribute set used in
the callback.
A sync.Map maintains the mapping of current instruments and label sets to
internal records. To find a record, the SDK consults the Map to
locate an existing record, otherwise it constructs a new record. The SDK
maintains a count of the number of references to each record, ensuring
that records are not reclaimed from the Map while they are still active
from the user's perspective.
A sync.Map maintains the mapping of current instruments and attribute sets to
internal records. To find a record, the SDK consults the Map to locate an
existing record, otherwise it constructs a new record. The SDK maintains a
count of the number of references to each record, ensuring that records are
not reclaimed from the Map while they are still active from the user's
perspective.
Metric collection is performed via a single-threaded call to Collect that
sweeps through all records in the SDK, checkpointing their state. When a
@@ -65,7 +65,7 @@ Export Pipeline
While the SDK serves to maintain a current set of records and
coordinate collection, the behavior of a metrics export pipeline is
configured through the export types in
go.opentelemetry.io/otel/sdk/export/metric. It is important to keep
go.opentelemetry.io/otel/sdk/metric/export. It is important to keep
in mind the context these interfaces are called from. There are two
contexts, instrumentation context, where a user-level goroutine that
enters the SDK resulting in a new record, and collection context,
@@ -106,11 +106,6 @@ Processor implementations are provided, the "defaultkeys" Processor groups
aggregate metrics by their recommended Descriptor.Keys(), the
"simple" Processor aggregates metrics at full dimensionality.
LabelEncoder is an optional optimization that allows an exporter to
provide the serialization logic for labels. This allows avoiding
duplicate serialization of labels, once as a unique key in the SDK (or
Processor) and once in the exporter.
Reader is an interface between the Processor and the Exporter.
After completing a collection pass, the Processor.Reader() method
returns a Reader, which the Exporter uses to iterate over all
@@ -118,10 +113,7 @@ the updated metrics.
Record is a struct containing the state of an individual exported
metric. This is the result of one collection interface for one
instrument and one label set.
Labels is a struct containing an ordered set of labels, the
corresponding unique encoding, and the encoder that produced it.
instrument and one attribute set.
Exporter is the final stage of an export pipeline. It is called with
a Reader capable of enumerating all the updated metrics.

View File

@@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregation // import "go.opentelemetry.io/otel/sdk/export/metric/aggregation"
package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
import (
"fmt"
"time"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/sdk/metric/number"
)
// These interfaces describe the various ways to access state from an
@@ -46,18 +46,6 @@ type (
Count() (uint64, error)
}
// Min returns the minimum value over the set of values that were aggregated.
Min interface {
Aggregation
Min() (number.Number, error)
}
// Max returns the maximum value over the set of values that were aggregated.
Max interface {
Aggregation
Max() (number.Number, error)
}
// LastValue returns the latest value that was aggregated.
LastValue interface {
Aggregation
@@ -67,7 +55,7 @@ type (
// Buckets represents histogram buckets boundaries and counts.
//
// For a Histogram with N defined boundaries, e.g, [x, y, z].
// There are N+1 counts: [-inf, x), [x, y), [y, z), [z, +inf]
// There are N+1 counts: [-inf, x), [x, y), [y, z), [z, +inf].
Buckets struct {
// Boundaries are floating point numbers, even when
// aggregating integers.

View File

@@ -14,10 +14,10 @@
//go:generate stringer -type=Temporality
package aggregation // import "go.opentelemetry.io/otel/sdk/export/metric/aggregation"
package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
import (
"go.opentelemetry.io/otel/metric/sdkapi"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// Temporality indicates the temporal aggregation exported by an exporter.

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/sdk/export/metric"
package export // import "go.opentelemetry.io/otel/sdk/metric/export"
import (
"context"
@@ -20,10 +20,10 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
"go.opentelemetry.io/otel/sdk/resource"
)
@@ -64,12 +64,11 @@ type Processor interface {
// disable metrics with active records.
AggregatorSelector
// Process is called by the SDK once per internal record,
// passing the export Accumulation (a Descriptor, the corresponding
// Labels, and the checkpointed Aggregator). This call has no
// Context argument because it is expected to perform only
// computation. An SDK is not expected to call exporters from
// with Process, use a controller for that (see
// Process is called by the SDK once per internal record, passing the
// export Accumulation (a Descriptor, the corresponding attributes, and
// the checkpointed Aggregator). This call has no Context argument because
// it is expected to perform only computation. An SDK is not expected to
// call exporters from with Process, use a controller for that (see
// ./controllers/{pull,push}.
Process(accum Accumulation) error
}
@@ -94,7 +93,7 @@ type AggregatorSelector interface {
// Note: This is context-free because the aggregator should
// not relate to the incoming context. This call should not
// block.
AggregatorFor(descriptor *sdkapi.Descriptor, aggregator ...*Aggregator)
AggregatorFor(descriptor *sdkapi.Descriptor, aggregator ...*aggregator.Aggregator)
}
// Checkpointer is the interface used by a Controller to coordinate
@@ -130,68 +129,6 @@ type CheckpointerFactory interface {
NewCheckpointer() Checkpointer
}
// Aggregator implements a specific aggregation behavior, e.g., a
// behavior to track a sequence of updates to an instrument. Counter
// instruments commonly use a simple Sum aggregator, but for the
// distribution instruments (Histogram, GaugeObserver) there are a
// number of possible aggregators with different cost and accuracy
// tradeoffs.
//
// Note that any Aggregator may be attached to any instrument--this is
// the result of the OpenTelemetry API/SDK separation. It is possible
// to attach a Sum aggregator to a Histogram instrument.
type Aggregator interface {
// Aggregation returns an Aggregation interface to access the
// current state of this Aggregator. The caller is
// responsible for synchronization and must not call any the
// other methods in this interface concurrently while using
// the Aggregation.
Aggregation() aggregation.Aggregation
// Update receives a new measured value and incorporates it
// into the aggregation. Update() calls may be called
// concurrently.
//
// Descriptor.NumberKind() should be consulted to determine
// whether the provided number is an int64 or float64.
//
// The Context argument comes from user-level code and could be
// inspected for a `correlation.Map` or `trace.SpanContext`.
Update(ctx context.Context, number number.Number, descriptor *sdkapi.Descriptor) error
// SynchronizedMove is called during collection to finish one
// period of aggregation by atomically saving the
// currently-updating state into the argument Aggregator AND
// resetting the current value to the zero state.
//
// SynchronizedMove() is called concurrently with Update(). These
// two methods must be synchronized with respect to each
// other, for correctness.
//
// After saving a synchronized copy, the Aggregator can be converted
// into one or more of the interfaces in the `aggregation` sub-package,
// according to kind of Aggregator that was selected.
//
// This method will return an InconsistentAggregatorError if
// this Aggregator cannot be copied into the destination due
// to an incompatible type.
//
// This call has no Context argument because it is expected to
// perform only computation.
//
// When called with a nil `destination`, this Aggregator is reset
// and the current value is discarded.
SynchronizedMove(destination Aggregator, descriptor *sdkapi.Descriptor) error
// Merge combines the checkpointed state from the argument
// Aggregator into this Aggregator. Merge is not synchronized
// with respect to Update or SynchronizedMove.
//
// The owner of an Aggregator being merged is responsible for
// synchronization of both Aggregator states.
Merge(aggregator Aggregator, descriptor *sdkapi.Descriptor) error
}
// Exporter handles presentation of the checkpoint of aggregate
// metrics. This is the final stage of a metrics export pipeline,
// where metric data are formatted for a specific system.
@@ -260,18 +197,18 @@ type Reader interface {
// steps.
type Metadata struct {
descriptor *sdkapi.Descriptor
labels *attribute.Set
attrs *attribute.Set
}
// Accumulation contains the exported data for a single metric instrument
// and label set, as prepared by an Accumulator for the Processor.
// and attribute set, as prepared by an Accumulator for the Processor.
type Accumulation struct {
Metadata
aggregator Aggregator
aggregator aggregator.Aggregator
}
// Record contains the exported data for a single metric instrument
// and label set, as prepared by the Processor for the Exporter.
// and attribute set, as prepared by the Processor for the Exporter.
// This includes the effective start and end time for the aggregation.
type Record struct {
Metadata
@@ -285,21 +222,21 @@ func (m Metadata) Descriptor() *sdkapi.Descriptor {
return m.descriptor
}
// Labels describes the labels associated with the instrument and the
// Attributes returns the attribute set associated with the instrument and the
// aggregated data.
func (m Metadata) Labels() *attribute.Set {
return m.labels
func (m Metadata) Attributes() *attribute.Set {
return m.attrs
}
// NewAccumulation allows Accumulator implementations to construct new
// Accumulations to send to Processors. The Descriptor, Labels,
// and Aggregator represent aggregate metric events received over a single
// Accumulations to send to Processors. The Descriptor, attributes, and
// Aggregator represent aggregate metric events received over a single
// collection period.
func NewAccumulation(descriptor *sdkapi.Descriptor, labels *attribute.Set, aggregator Aggregator) Accumulation {
func NewAccumulation(descriptor *sdkapi.Descriptor, attrs *attribute.Set, aggregator aggregator.Aggregator) Accumulation {
return Accumulation{
Metadata: Metadata{
descriptor: descriptor,
labels: labels,
attrs: attrs,
},
aggregator: aggregator,
}
@@ -307,18 +244,18 @@ func NewAccumulation(descriptor *sdkapi.Descriptor, labels *attribute.Set, aggre
// Aggregator returns the checkpointed aggregator. It is safe to
// access the checkpointed state without locking.
func (r Accumulation) Aggregator() Aggregator {
func (r Accumulation) Aggregator() aggregator.Aggregator {
return r.aggregator
}
// NewRecord allows Processor implementations to construct export
// records. The Descriptor, Labels, and Aggregator represent
// aggregate metric events received over a single collection period.
func NewRecord(descriptor *sdkapi.Descriptor, labels *attribute.Set, aggregation aggregation.Aggregation, start, end time.Time) Record {
// NewRecord allows Processor implementations to construct export records.
// The Descriptor, attributes, and Aggregator represent aggregate metric
// events received over a single collection period.
func NewRecord(descriptor *sdkapi.Descriptor, attrs *attribute.Set, aggregation aggregation.Aggregation, start, end time.Time) Record {
return Record{
Metadata: Metadata{
descriptor: descriptor,
labels: labels,
attrs: attrs,
},
aggregation: aggregation,
start: start,

View File

@@ -0,0 +1,23 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package number provides a number abstraction for instruments that
either support int64 or float64 input values.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
*/
package number // import "go.opentelemetry.io/otel/sdk/metric/number"

View File

@@ -0,0 +1,24 @@
// Code generated by "stringer -type=Kind"; DO NOT EDIT.
package number
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Int64Kind-0]
_ = x[Float64Kind-1]
}
const _Kind_name = "Int64KindFloat64Kind"
var _Kind_index = [...]uint8{0, 9, 20}
func (i Kind) String() string {
if i < 0 || i >= Kind(len(_Kind_index)-1) {
return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
}

View File

@@ -0,0 +1,538 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package number // import "go.opentelemetry.io/otel/sdk/metric/number"
//go:generate stringer -type=Kind
import (
"fmt"
"math"
"sync/atomic"
"go.opentelemetry.io/otel/internal"
)
// Kind describes the data type of the Number.
type Kind int8
const (
// Int64Kind means that the Number stores int64.
Int64Kind Kind = iota
// Float64Kind means that the Number stores float64.
Float64Kind
)
// Zero returns a zero value for a given Kind.
func (k Kind) Zero() Number {
switch k {
case Int64Kind:
return NewInt64Number(0)
case Float64Kind:
return NewFloat64Number(0.)
default:
return Number(0)
}
}
// Minimum returns the minimum representable value
// for a given Kind.
func (k Kind) Minimum() Number {
switch k {
case Int64Kind:
return NewInt64Number(math.MinInt64)
case Float64Kind:
return NewFloat64Number(-1. * math.MaxFloat64)
default:
return Number(0)
}
}
// Maximum returns the maximum representable value
// for a given Kind.
func (k Kind) Maximum() Number {
switch k {
case Int64Kind:
return NewInt64Number(math.MaxInt64)
case Float64Kind:
return NewFloat64Number(math.MaxFloat64)
default:
return Number(0)
}
}
// Number represents either an integral or a floating point value. It
// needs to be accompanied with a source of Kind that describes
// the actual type of the value stored within Number.
type Number uint64
// - constructors
// NewNumberFromRaw creates a new Number from a raw value.
func NewNumberFromRaw(r uint64) Number {
return Number(r)
}
// NewInt64Number creates an integral Number.
func NewInt64Number(i int64) Number {
return NewNumberFromRaw(internal.Int64ToRaw(i))
}
// NewFloat64Number creates a floating point Number.
func NewFloat64Number(f float64) Number {
return NewNumberFromRaw(internal.Float64ToRaw(f))
}
// NewNumberSignChange returns a number with the same magnitude and
// the opposite sign. `kind` must describe the kind of number in `nn`.
func NewNumberSignChange(kind Kind, nn Number) Number {
switch kind {
case Int64Kind:
return NewInt64Number(-nn.AsInt64())
case Float64Kind:
return NewFloat64Number(-nn.AsFloat64())
}
return nn
}
// - as x
// AsNumber gets the Number.
func (n *Number) AsNumber() Number {
return *n
}
// AsRaw gets the uninterpreted raw value. Might be useful for some
// atomic operations.
func (n *Number) AsRaw() uint64 {
return uint64(*n)
}
// AsInt64 assumes that the value contains an int64 and returns it as
// such.
func (n *Number) AsInt64() int64 {
return internal.RawToInt64(n.AsRaw())
}
// AsFloat64 assumes that the measurement value contains a float64 and
// returns it as such.
func (n *Number) AsFloat64() float64 {
return internal.RawToFloat64(n.AsRaw())
}
// - as x atomic
// AsNumberAtomic gets the Number atomically.
func (n *Number) AsNumberAtomic() Number {
return NewNumberFromRaw(n.AsRawAtomic())
}
// AsRawAtomic gets the uninterpreted raw value atomically. Might be
// useful for some atomic operations.
func (n *Number) AsRawAtomic() uint64 {
return atomic.LoadUint64(n.AsRawPtr())
}
// AsInt64Atomic assumes that the number contains an int64 and returns
// it as such atomically.
func (n *Number) AsInt64Atomic() int64 {
return atomic.LoadInt64(n.AsInt64Ptr())
}
// AsFloat64Atomic assumes that the measurement value contains a
// float64 and returns it as such atomically.
func (n *Number) AsFloat64Atomic() float64 {
return internal.RawToFloat64(n.AsRawAtomic())
}
// - as x ptr
// AsRawPtr gets the pointer to the raw, uninterpreted raw
// value. Might be useful for some atomic operations.
func (n *Number) AsRawPtr() *uint64 {
return (*uint64)(n)
}
// AsInt64Ptr assumes that the number contains an int64 and returns a
// pointer to it.
func (n *Number) AsInt64Ptr() *int64 {
return internal.RawPtrToInt64Ptr(n.AsRawPtr())
}
// AsFloat64Ptr assumes that the number contains a float64 and returns a
// pointer to it.
func (n *Number) AsFloat64Ptr() *float64 {
return internal.RawPtrToFloat64Ptr(n.AsRawPtr())
}
// - coerce
// CoerceToInt64 casts the number to int64. May result in
// data/precision loss.
func (n *Number) CoerceToInt64(kind Kind) int64 {
switch kind {
case Int64Kind:
return n.AsInt64()
case Float64Kind:
return int64(n.AsFloat64())
default:
// you get what you deserve
return 0
}
}
// CoerceToFloat64 casts the number to float64. May result in
// data/precision loss.
func (n *Number) CoerceToFloat64(kind Kind) float64 {
switch kind {
case Int64Kind:
return float64(n.AsInt64())
case Float64Kind:
return n.AsFloat64()
default:
// you get what you deserve
return 0
}
}
// - set
// SetNumber sets the number to the passed number. Both should be of
// the same kind.
func (n *Number) SetNumber(nn Number) {
*n.AsRawPtr() = nn.AsRaw()
}
// SetRaw sets the number to the passed raw value. Both number and the
// raw number should represent the same kind.
func (n *Number) SetRaw(r uint64) {
*n.AsRawPtr() = r
}
// SetInt64 assumes that the number contains an int64 and sets it to
// the passed value.
func (n *Number) SetInt64(i int64) {
*n.AsInt64Ptr() = i
}
// SetFloat64 assumes that the number contains a float64 and sets it
// to the passed value.
func (n *Number) SetFloat64(f float64) {
*n.AsFloat64Ptr() = f
}
// - set atomic
// SetNumberAtomic sets the number to the passed number
// atomically. Both should be of the same kind.
func (n *Number) SetNumberAtomic(nn Number) {
atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw())
}
// SetRawAtomic sets the number to the passed raw value
// atomically. Both number and the raw number should represent the
// same kind.
func (n *Number) SetRawAtomic(r uint64) {
atomic.StoreUint64(n.AsRawPtr(), r)
}
// SetInt64Atomic assumes that the number contains an int64 and sets
// it to the passed value atomically.
func (n *Number) SetInt64Atomic(i int64) {
atomic.StoreInt64(n.AsInt64Ptr(), i)
}
// SetFloat64Atomic assumes that the number contains a float64 and
// sets it to the passed value atomically.
func (n *Number) SetFloat64Atomic(f float64) {
atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f))
}
// - swap
// SwapNumber sets the number to the passed number and returns the old
// number. Both this number and the passed number should be of the
// same kind.
func (n *Number) SwapNumber(nn Number) Number {
old := *n
n.SetNumber(nn)
return old
}
// SwapRaw sets the number to the passed raw value and returns the old
// raw value. Both number and the raw number should represent the same
// kind.
func (n *Number) SwapRaw(r uint64) uint64 {
old := n.AsRaw()
n.SetRaw(r)
return old
}
// SwapInt64 assumes that the number contains an int64, sets it to the
// passed value and returns the old int64 value.
func (n *Number) SwapInt64(i int64) int64 {
old := n.AsInt64()
n.SetInt64(i)
return old
}
// SwapFloat64 assumes that the number contains an float64, sets it to
// the passed value and returns the old float64 value.
func (n *Number) SwapFloat64(f float64) float64 {
old := n.AsFloat64()
n.SetFloat64(f)
return old
}
// - swap atomic
// SwapNumberAtomic sets the number to the passed number and returns
// the old number atomically. Both this number and the passed number
// should be of the same kind.
func (n *Number) SwapNumberAtomic(nn Number) Number {
return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw()))
}
// SwapRawAtomic sets the number to the passed raw value and returns
// the old raw value atomically. Both number and the raw number should
// represent the same kind.
func (n *Number) SwapRawAtomic(r uint64) uint64 {
return atomic.SwapUint64(n.AsRawPtr(), r)
}
// SwapInt64Atomic assumes that the number contains an int64, sets it
// to the passed value and returns the old int64 value atomically.
func (n *Number) SwapInt64Atomic(i int64) int64 {
return atomic.SwapInt64(n.AsInt64Ptr(), i)
}
// SwapFloat64Atomic assumes that the number contains an float64, sets
// it to the passed value and returns the old float64 value
// atomically.
func (n *Number) SwapFloat64Atomic(f float64) float64 {
return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f)))
}
// - add
// AddNumber assumes that this and the passed number are of the passed
// kind and adds the passed number to this number.
func (n *Number) AddNumber(kind Kind, nn Number) {
switch kind {
case Int64Kind:
n.AddInt64(nn.AsInt64())
case Float64Kind:
n.AddFloat64(nn.AsFloat64())
}
}
// AddRaw assumes that this number and the passed raw value are of the
// passed kind and adds the passed raw value to this number.
func (n *Number) AddRaw(kind Kind, r uint64) {
n.AddNumber(kind, NewNumberFromRaw(r))
}
// AddInt64 assumes that the number contains an int64 and adds the
// passed int64 to it.
func (n *Number) AddInt64(i int64) {
*n.AsInt64Ptr() += i
}
// AddFloat64 assumes that the number contains a float64 and adds the
// passed float64 to it.
func (n *Number) AddFloat64(f float64) {
*n.AsFloat64Ptr() += f
}
// - add atomic
// AddNumberAtomic assumes that this and the passed number are of the
// passed kind and adds the passed number to this number atomically.
func (n *Number) AddNumberAtomic(kind Kind, nn Number) {
switch kind {
case Int64Kind:
n.AddInt64Atomic(nn.AsInt64())
case Float64Kind:
n.AddFloat64Atomic(nn.AsFloat64())
}
}
// AddRawAtomic assumes that this number and the passed raw value are
// of the passed kind and adds the passed raw value to this number
// atomically.
func (n *Number) AddRawAtomic(kind Kind, r uint64) {
n.AddNumberAtomic(kind, NewNumberFromRaw(r))
}
// AddInt64Atomic assumes that the number contains an int64 and adds
// the passed int64 to it atomically.
func (n *Number) AddInt64Atomic(i int64) {
atomic.AddInt64(n.AsInt64Ptr(), i)
}
// AddFloat64Atomic assumes that the number contains a float64 and
// adds the passed float64 to it atomically.
func (n *Number) AddFloat64Atomic(f float64) {
for {
o := n.AsFloat64Atomic()
if n.CompareAndSwapFloat64(o, o+f) {
break
}
}
}
// - compare and swap (atomic only)
// CompareAndSwapNumber does the atomic CAS operation on this
// number. This number and passed old and new numbers should be of the
// same kind.
func (n *Number) CompareAndSwapNumber(on, nn Number) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw())
}
// CompareAndSwapRaw does the atomic CAS operation on this
// number. This number and passed old and new raw values should be of
// the same kind.
func (n *Number) CompareAndSwapRaw(or, nr uint64) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr)
}
// CompareAndSwapInt64 assumes that this number contains an int64 and
// does the atomic CAS operation on it.
func (n *Number) CompareAndSwapInt64(oi, ni int64) bool {
return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni)
}
// CompareAndSwapFloat64 assumes that this number contains a float64 and
// does the atomic CAS operation on it.
func (n *Number) CompareAndSwapFloat64(of, nf float64) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf))
}
// - compare
// CompareNumber compares two Numbers given their kind. Both numbers
// should have the same kind. This returns:
// 0 if the numbers are equal
// -1 if the subject `n` is less than the argument `nn`
// +1 if the subject `n` is greater than the argument `nn`
func (n *Number) CompareNumber(kind Kind, nn Number) int {
switch kind {
case Int64Kind:
return n.CompareInt64(nn.AsInt64())
case Float64Kind:
return n.CompareFloat64(nn.AsFloat64())
default:
// you get what you deserve
return 0
}
}
// CompareRaw compares two numbers, where one is input as a raw
// uint64, interpreting both values as a `kind` of number.
func (n *Number) CompareRaw(kind Kind, r uint64) int {
return n.CompareNumber(kind, NewNumberFromRaw(r))
}
// CompareInt64 assumes that the Number contains an int64 and performs
// a comparison between the value and the other value. It returns the
// typical result of the compare function: -1 if the value is less
// than the other, 0 if both are equal, 1 if the value is greater than
// the other.
func (n *Number) CompareInt64(i int64) int {
this := n.AsInt64()
if this < i {
return -1
} else if this > i {
return 1
}
return 0
}
// CompareFloat64 assumes that the Number contains a float64 and
// performs a comparison between the value and the other value. It
// returns the typical result of the compare function: -1 if the value
// is less than the other, 0 if both are equal, 1 if the value is
// greater than the other.
//
// Do not compare NaN values.
func (n *Number) CompareFloat64(f float64) int {
this := n.AsFloat64()
if this < f {
return -1
} else if this > f {
return 1
}
return 0
}
// - relations to zero
// IsPositive returns true if the actual value is greater than zero.
func (n *Number) IsPositive(kind Kind) bool {
return n.compareWithZero(kind) > 0
}
// IsNegative returns true if the actual value is less than zero.
func (n *Number) IsNegative(kind Kind) bool {
return n.compareWithZero(kind) < 0
}
// IsZero returns true if the actual value is equal to zero.
func (n *Number) IsZero(kind Kind) bool {
return n.compareWithZero(kind) == 0
}
// - misc
// Emit returns a string representation of the raw value of the
// Number. A %d is used for integral values, %f for floating point
// values.
func (n *Number) Emit(kind Kind) string {
switch kind {
case Int64Kind:
return fmt.Sprintf("%d", n.AsInt64())
case Float64Kind:
return fmt.Sprintf("%f", n.AsFloat64())
default:
return ""
}
}
// AsInterface returns the number as an interface{}, typically used
// for Kind-correct JSON conversion.
func (n *Number) AsInterface(kind Kind) interface{} {
switch kind {
case Int64Kind:
return n.AsInt64()
case Float64Kind:
return n.AsFloat64()
default:
return math.NaN()
}
}
// - private stuff
func (n *Number) compareWithZero(kind Kind) int {
switch kind {
case Int64Kind:
return n.CompareInt64(0)
case Float64Kind:
return n.CompareFloat64(0.)
default:
// you get what you deserve
return 0
}
}

View File

@@ -21,9 +21,10 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
type (
@@ -51,8 +52,8 @@ type (
}
stateValue struct {
// labels corresponds to the stateKey.distinct field.
labels *attribute.Set
// attrs corresponds to the stateKey.distinct field.
attrs *attribute.Set
// updated indicates the last sequence number when this value had
// Process() called by an accumulator.
@@ -74,12 +75,12 @@ type (
// (if !currentOwned) or it refers to an Aggregator
// owned by the processor used to accumulate multiple
// values in a single collection round.
current export.Aggregator
current aggregator.Aggregator
// cumulative, if non-nil, refers to an Aggregator owned
// by the processor used to store the last cumulative
// value.
cumulative export.Aggregator
cumulative aggregator.Aggregator
}
state struct {
@@ -131,7 +132,7 @@ type factory struct {
func NewFactory(aselector export.AggregatorSelector, tselector aggregation.TemporalitySelector, opts ...Option) export.CheckpointerFactory {
var config config
for _, opt := range opts {
opt.applyProcessor(&config)
config = opt.applyProcessor(config)
}
return factory{
aselector: aselector,
@@ -166,7 +167,7 @@ func (b *Processor) Process(accum export.Accumulation) error {
desc := accum.Descriptor()
key := stateKey{
descriptor: desc,
distinct: accum.Labels().Equivalent(),
distinct: accum.Attributes().Equivalent(),
}
agg := accum.Aggregator()
@@ -176,7 +177,7 @@ func (b *Processor) Process(accum export.Accumulation) error {
stateful := b.TemporalityFor(desc, agg.Aggregation().Kind()).MemoryRequired(desc.InstrumentKind())
newValue := &stateValue{
labels: accum.Labels(),
attrs: accum.Attributes(),
updated: b.state.finishedCollection,
stateful: stateful,
current: agg,
@@ -229,7 +230,7 @@ func (b *Processor) Process(accum export.Accumulation) error {
// indicating that the stateKey for Accumulation has already
// been seen in the same collection. When this happens, it
// implies that multiple Accumulators are being used, or that
// a single Accumulator has been configured with a label key
// a single Accumulator has been configured with a attribute key
// filter.
if !sameCollection {
@@ -369,7 +370,7 @@ func (b *state) ForEach(exporter aggregation.TemporalitySelector, f func(export.
if err := f(export.NewRecord(
key.descriptor,
value.labels,
value.attrs,
agg,
start,
b.intervalEnd,

View File

@@ -16,27 +16,27 @@ package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic"
// config contains the options for configuring a basic metric processor.
type config struct {
// Memory controls whether the processor remembers metric
// instruments and label sets that were previously reported.
// When Memory is true, Reader.ForEach() will visit
// metrics that were not updated in the most recent interval.
// Memory controls whether the processor remembers metric instruments and
// attribute sets that were previously reported. When Memory is true,
// Reader.ForEach() will visit metrics that were not updated in the most
// recent interval.
Memory bool
}
type Option interface {
applyProcessor(*config)
applyProcessor(config) config
}
// WithMemory sets the memory behavior of a Processor. If this is
// true, the processor will report metric instruments and label sets
// that were previously reported but not updated in the most recent
// interval.
// WithMemory sets the memory behavior of a Processor. If this is true, the
// processor will report metric instruments and attribute sets that were
// previously reported but not updated in the most recent interval.
func WithMemory(memory bool) Option {
return memoryOption(memory)
}
type memoryOption bool
func (m memoryOption) applyProcessor(cfg *config) {
func (m memoryOption) applyProcessor(cfg config) config {
cfg.Memory = bool(m)
return cfg
}

View File

@@ -0,0 +1,24 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package registry provides a non-standalone implementation of
MeterProvider that adds uniqueness checking for instrument descriptors
on top of other MeterProvider it wraps.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
*/
package registry // import "go.opentelemetry.io/otel/sdk/metric/registry"

View File

@@ -0,0 +1,138 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry // import "go.opentelemetry.io/otel/sdk/metric/registry"
import (
"context"
"fmt"
"sync"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
// uniqueness checking for instrument descriptors.
type UniqueInstrumentMeterImpl struct {
lock sync.Mutex
impl sdkapi.MeterImpl
state map[string]sdkapi.InstrumentImpl
}
var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil)
// ErrMetricKindMismatch is the standard error for mismatched metric
// instrument definitions.
var ErrMetricKindMismatch = fmt.Errorf(
"a metric was already registered by this name with another kind or number type")
// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl
// with the addition of instrument name uniqueness checking.
func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl {
return &UniqueInstrumentMeterImpl{
impl: impl,
state: map[string]sdkapi.InstrumentImpl{},
}
}
// MeterImpl gives the caller access to the underlying MeterImpl
// used by this UniqueInstrumentMeterImpl.
func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl {
return u.impl
}
// NewMetricKindMismatchError formats an error that describes a
// mismatched metric instrument definition.
func NewMetricKindMismatchError(desc sdkapi.Descriptor) error {
return fmt.Errorf("metric %s registered as %s %s: %w",
desc.Name(),
desc.NumberKind(),
desc.InstrumentKind(),
ErrMetricKindMismatch)
}
// Compatible determines whether two sdkapi.Descriptors are considered
// the same for the purpose of uniqueness checking.
func Compatible(candidate, existing sdkapi.Descriptor) bool {
return candidate.InstrumentKind() == existing.InstrumentKind() &&
candidate.NumberKind() == existing.NumberKind()
}
// checkUniqueness returns an ErrMetricKindMismatch error if there is
// a conflict between a descriptor that was already registered and the
// `descriptor` argument. If there is an existing compatible
// registration, this returns the already-registered instrument. If
// there is no conflict and no prior registration, returns (nil, nil).
func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) {
impl, ok := u.state[descriptor.Name()]
if !ok {
return nil, nil
}
if !Compatible(descriptor, impl.Descriptor()) {
return nil, NewMetricKindMismatchError(impl.Descriptor())
}
return impl, nil
}
// NewSyncInstrument implements sdkapi.MeterImpl.
func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) {
u.lock.Lock()
defer u.lock.Unlock()
impl, err := u.checkUniqueness(descriptor)
if err != nil {
return nil, err
} else if impl != nil {
return impl.(sdkapi.SyncImpl), nil
}
syncInst, err := u.impl.NewSyncInstrument(descriptor)
if err != nil {
return nil, err
}
u.state[descriptor.Name()] = syncInst
return syncInst, nil
}
// NewAsyncInstrument implements sdkapi.MeterImpl.
func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.AsyncImpl, error) {
u.lock.Lock()
defer u.lock.Unlock()
impl, err := u.checkUniqueness(descriptor)
if err != nil {
return nil, err
} else if impl != nil {
return impl.(sdkapi.AsyncImpl), nil
}
asyncInst, err := u.impl.NewAsyncInstrument(descriptor)
if err != nil {
return nil, err
}
u.state[descriptor.Name()] = asyncInst
return asyncInst, nil
}
func (u *UniqueInstrumentMeterImpl) RegisterCallback(insts []instrument.Asynchronous, callback func(context.Context)) error {
u.lock.Lock()
defer u.lock.Unlock()
return u.impl.RegisterCallback(insts, callback)
}

View File

@@ -23,11 +23,11 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
internal "go.opentelemetry.io/otel/internal/metric"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/metric/number"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
type (
@@ -44,10 +44,8 @@ type (
// current maps `mapkey` to *record.
current sync.Map
// asyncInstruments is a set of
// `*asyncInstrument` instances
asyncLock sync.Mutex
asyncInstruments *internal.AsyncInstrumentState
callbackLock sync.Mutex
callbacks map[*callback]struct{}
// currentEpoch is the current epoch number. It is
// incremented in `Collect()`.
@@ -58,19 +56,27 @@ type (
// collectLock prevents simultaneous calls to Collect().
collectLock sync.Mutex
}
// asyncSortSlice has a single purpose - as a temporary
// place for sorting during labels creation to avoid
// allocation. It is cleared after use.
asyncSortSlice attribute.Sortable
callback struct {
insts map[*asyncInstrument]struct{}
f func(context.Context)
}
asyncContextKey struct{}
asyncInstrument struct {
baseInstrument
instrument.Asynchronous
}
syncInstrument struct {
instrument
baseInstrument
instrument.Synchronous
}
// mapkey uniquely describes a metric instrument in terms of
// its InstrumentID and the encoded form of its labels.
// mapkey uniquely describes a metric instrument in terms of its
// InstrumentID and the encoded form of its attributes.
mapkey struct {
descriptor *sdkapi.Descriptor
ordered attribute.Distinct
@@ -92,62 +98,41 @@ type (
// supports checking for no updates during a round.
collectedCount int64
// storage is the stored label set for this record,
// except in cases where a label set is shared due to
// batch recording.
storage attribute.Set
// attrs is the stored attribute set for this record, except in cases
// where a attribute set is shared due to batch recording.
attrs attribute.Set
// labels is the processed label set for this record.
// this may refer to the `storage` field in another
// record if this label set is shared resulting from
// `RecordBatch`.
labels *attribute.Set
// sortSlice has a single purpose - as a temporary
// place for sorting during labels creation to avoid
// allocation.
// sortSlice has a single purpose - as a temporary place for sorting
// during attributes creation to avoid allocation.
sortSlice attribute.Sortable
// inst is a pointer to the corresponding instrument.
inst *syncInstrument
inst *baseInstrument
// current implements the actual RecordOne() API,
// depending on the type of aggregation. If nil, the
// metric was disabled by the exporter.
current export.Aggregator
checkpoint export.Aggregator
current aggregator.Aggregator
checkpoint aggregator.Aggregator
}
instrument struct {
baseInstrument struct {
meter *Accumulator
descriptor sdkapi.Descriptor
}
asyncInstrument struct {
instrument
// recorders maps ordered labels to the pair of
// labelset and recorder
recorders map[attribute.Distinct]*labeledRecorder
}
labeledRecorder struct {
observedEpoch int64
labels *attribute.Set
observed export.Aggregator
}
)
var (
_ sdkapi.MeterImpl = &Accumulator{}
_ sdkapi.AsyncImpl = &asyncInstrument{}
_ sdkapi.SyncImpl = &syncInstrument{}
// ErrUninitializedInstrument is returned when an instrument is used when uninitialized.
ErrUninitializedInstrument = fmt.Errorf("use of an uninitialized instrument")
ErrBadInstrument = fmt.Errorf("use of a instrument from another SDK")
)
func (inst *instrument) Descriptor() sdkapi.Descriptor {
return inst.descriptor
func (b *baseInstrument) Descriptor() sdkapi.Descriptor {
return b.descriptor
}
func (a *asyncInstrument) Implementation() interface{} {
@@ -158,77 +143,24 @@ func (s *syncInstrument) Implementation() interface{} {
return s
}
func (a *asyncInstrument) observe(num number.Number, labels *attribute.Set) {
if err := aggregator.RangeTest(num, &a.descriptor); err != nil {
otel.Handle(err)
return
}
recorder := a.getRecorder(labels)
if recorder == nil {
// The instrument is disabled according to the
// AggregatorSelector.
return
}
if err := recorder.Update(context.Background(), num, &a.descriptor); err != nil {
otel.Handle(err)
return
}
}
func (a *asyncInstrument) getRecorder(labels *attribute.Set) export.Aggregator {
lrec, ok := a.recorders[labels.Equivalent()]
if ok {
// Note: SynchronizedMove(nil) can't return an error
_ = lrec.observed.SynchronizedMove(nil, &a.descriptor)
lrec.observedEpoch = a.meter.currentEpoch
a.recorders[labels.Equivalent()] = lrec
return lrec.observed
}
var rec export.Aggregator
a.meter.processor.AggregatorFor(&a.descriptor, &rec)
if a.recorders == nil {
a.recorders = make(map[attribute.Distinct]*labeledRecorder)
}
// This may store nil recorder in the map, thus disabling the
// asyncInstrument for the labelset for good. This is intentional,
// but will be revisited later.
a.recorders[labels.Equivalent()] = &labeledRecorder{
observed: rec,
labels: labels,
observedEpoch: a.meter.currentEpoch,
}
return rec
}
// acquireHandle gets or creates a `*record` corresponding to `kvs`,
// the input labels. The second argument `labels` is passed in to
// support re-use of the orderedLabels computed by a previous
// measurement in the same batch. This performs two allocations
// in the common case.
func (s *syncInstrument) acquireHandle(kvs []attribute.KeyValue, labelPtr *attribute.Set) *record {
var rec *record
var equiv attribute.Distinct
// the input attributes.
func (b *baseInstrument) acquireHandle(kvs []attribute.KeyValue) *record {
if labelPtr == nil {
// This memory allocation may not be used, but it's
// needed for the `sortSlice` field, to avoid an
// allocation while sorting.
rec = &record{}
rec.storage = attribute.NewSetWithSortable(kvs, &rec.sortSlice)
rec.labels = &rec.storage
equiv = rec.storage.Equivalent()
} else {
equiv = labelPtr.Equivalent()
}
// This memory allocation may not be used, but it's
// needed for the `sortSlice` field, to avoid an
// allocation while sorting.
rec := &record{}
rec.attrs = attribute.NewSetWithSortable(kvs, &rec.sortSlice)
// Create lookup key for sync.Map (one allocation, as this
// passes through an interface{})
mk := mapkey{
descriptor: &s.descriptor,
ordered: equiv,
descriptor: &b.descriptor,
ordered: rec.attrs.Equivalent(),
}
if actual, ok := s.meter.current.Load(mk); ok {
if actual, ok := b.meter.current.Load(mk); ok {
// Existing record case.
existingRec := actual.(*record)
if existingRec.refMapped.ref() {
@@ -239,19 +171,15 @@ func (s *syncInstrument) acquireHandle(kvs []attribute.KeyValue, labelPtr *attri
// This entry is no longer mapped, try to add a new entry.
}
if rec == nil {
rec = &record{}
rec.labels = labelPtr
}
rec.refMapped = refcountMapped{value: 2}
rec.inst = s
rec.inst = b
s.meter.processor.AggregatorFor(&s.descriptor, &rec.current, &rec.checkpoint)
b.meter.processor.AggregatorFor(&b.descriptor, &rec.current, &rec.checkpoint)
for {
// Load/Store: there's a memory allocation to place `mk` into
// an interface here.
if actual, loaded := s.meter.current.LoadOrStore(mk, rec); loaded {
if actual, loaded := b.meter.current.LoadOrStore(mk, rec); loaded {
// Existing record case. Cannot change rec here because if fail
// will try to add rec again to avoid new allocations.
oldRec := actual.(*record)
@@ -278,11 +206,22 @@ func (s *syncInstrument) acquireHandle(kvs []attribute.KeyValue, labelPtr *attri
}
}
// RecordOne captures a single synchronous metric event.
//
// The order of the input array `kvs` may be sorted after the function is called.
func (s *syncInstrument) RecordOne(ctx context.Context, num number.Number, kvs []attribute.KeyValue) {
h := s.acquireHandle(kvs, nil)
h := s.acquireHandle(kvs)
defer h.unbind()
h.RecordOne(ctx, num)
h.captureOne(ctx, num)
}
// ObserveOne captures a single asynchronous metric event.
// The order of the input array `kvs` may be sorted after the function is called.
func (a *asyncInstrument) ObserveOne(ctx context.Context, num number.Number, attrs []attribute.KeyValue) {
h := a.acquireHandle(attrs)
defer h.unbind()
h.captureOne(ctx, num)
}
// NewAccumulator constructs a new Accumulator for the given
@@ -296,15 +235,17 @@ func (s *syncInstrument) RecordOne(ctx context.Context, num number.Number, kvs [
// own periodic collection.
func NewAccumulator(processor export.Processor) *Accumulator {
return &Accumulator{
processor: processor,
asyncInstruments: internal.NewAsyncInstrumentState(),
processor: processor,
callbacks: map[*callback]struct{}{},
}
}
var _ sdkapi.MeterImpl = &Accumulator{}
// NewSyncInstrument implements sdkapi.MetricImpl.
func (m *Accumulator) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) {
return &syncInstrument{
instrument: instrument{
baseInstrument: baseInstrument{
descriptor: descriptor,
meter: m,
},
@@ -312,19 +253,40 @@ func (m *Accumulator) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.Sy
}
// NewAsyncInstrument implements sdkapi.MetricImpl.
func (m *Accumulator) NewAsyncInstrument(descriptor sdkapi.Descriptor, runner sdkapi.AsyncRunner) (sdkapi.AsyncImpl, error) {
func (m *Accumulator) NewAsyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.AsyncImpl, error) {
a := &asyncInstrument{
instrument: instrument{
baseInstrument: baseInstrument{
descriptor: descriptor,
meter: m,
},
}
m.asyncLock.Lock()
defer m.asyncLock.Unlock()
m.asyncInstruments.Register(a, runner)
return a, nil
}
func (m *Accumulator) RegisterCallback(insts []instrument.Asynchronous, f func(context.Context)) error {
cb := &callback{
insts: map[*asyncInstrument]struct{}{},
f: f,
}
for _, inst := range insts {
impl, ok := inst.(sdkapi.AsyncImpl)
if !ok {
return ErrBadInstrument
}
ai, err := m.fromAsync(impl)
if err != nil {
return err
}
cb.insts[ai] = struct{}{}
}
m.callbackLock.Lock()
defer m.callbackLock.Unlock()
m.callbacks[cb] = struct{}{}
return nil
}
// Collect traverses the list of active records and observers and
// exports data for each active instrument. Collect() may not be
// called concurrently.
@@ -337,14 +299,14 @@ func (m *Accumulator) Collect(ctx context.Context) int {
m.collectLock.Lock()
defer m.collectLock.Unlock()
checkpointed := m.observeAsyncInstruments(ctx)
checkpointed += m.collectSyncInstruments()
m.runAsyncCallbacks(ctx)
checkpointed := m.collectInstruments()
m.currentEpoch++
return checkpointed
}
func (m *Accumulator) collectSyncInstruments() int {
func (m *Accumulator) collectInstruments() int {
checkpointed := 0
m.current.Range(func(key interface{}, value interface{}) bool {
@@ -387,35 +349,17 @@ func (m *Accumulator) collectSyncInstruments() int {
return checkpointed
}
// CollectAsync implements internal.AsyncCollector.
// The order of the input array `kvs` may be sorted after the function is called.
func (m *Accumulator) CollectAsync(kv []attribute.KeyValue, obs ...sdkapi.Observation) {
labels := attribute.NewSetWithSortable(kv, &m.asyncSortSlice)
func (m *Accumulator) runAsyncCallbacks(ctx context.Context) {
m.callbackLock.Lock()
defer m.callbackLock.Unlock()
for _, ob := range obs {
if a := m.fromAsync(ob.AsyncImpl()); a != nil {
a.observe(ob.Number(), &labels)
}
ctx = context.WithValue(ctx, asyncContextKey{}, m)
for cb := range m.callbacks {
cb.f(ctx)
}
}
func (m *Accumulator) observeAsyncInstruments(ctx context.Context) int {
m.asyncLock.Lock()
defer m.asyncLock.Unlock()
asyncCollected := 0
m.asyncInstruments.Run(ctx, m)
for _, inst := range m.asyncInstruments.Instruments() {
if a := m.fromAsync(inst); a != nil {
asyncCollected += m.checkpointAsync(a)
}
}
return asyncCollected
}
func (m *Accumulator) checkpointRecord(r *record) int {
if r.current == nil {
return 0
@@ -426,7 +370,7 @@ func (m *Accumulator) checkpointRecord(r *record) int {
return 0
}
a := export.NewAccumulation(&r.inst.descriptor, r.labels, r.checkpoint)
a := export.NewAccumulation(&r.inst.descriptor, &r.attrs, r.checkpoint)
err = m.processor.Process(a)
if err != nil {
otel.Handle(err)
@@ -434,63 +378,7 @@ func (m *Accumulator) checkpointRecord(r *record) int {
return 1
}
func (m *Accumulator) checkpointAsync(a *asyncInstrument) int {
if len(a.recorders) == 0 {
return 0
}
checkpointed := 0
for encodedLabels, lrec := range a.recorders {
lrec := lrec
epochDiff := m.currentEpoch - lrec.observedEpoch
if epochDiff == 0 {
if lrec.observed != nil {
a := export.NewAccumulation(&a.descriptor, lrec.labels, lrec.observed)
err := m.processor.Process(a)
if err != nil {
otel.Handle(err)
}
checkpointed++
}
} else if epochDiff > 1 {
// This is second collection cycle with no
// observations for this labelset. Remove the
// recorder.
delete(a.recorders, encodedLabels)
}
}
if len(a.recorders) == 0 {
a.recorders = nil
}
return checkpointed
}
// RecordBatch enters a batch of metric events.
// The order of the input array `kvs` may be sorted after the function is called.
func (m *Accumulator) RecordBatch(ctx context.Context, kvs []attribute.KeyValue, measurements ...sdkapi.Measurement) {
// Labels will be computed the first time acquireHandle is
// called. Subsequent calls to acquireHandle will re-use the
// previously computed value instead of recomputing the
// ordered labels.
var labelsPtr *attribute.Set
for i, meas := range measurements {
s := m.fromSync(meas.SyncImpl())
if s == nil {
continue
}
h := s.acquireHandle(kvs, labelsPtr)
// Re-use labels for the next measurement.
if i == 0 {
labelsPtr = h.labels
}
defer h.unbind()
h.RecordOne(ctx, meas.Number())
}
}
// RecordOne implements sdkapi.SyncImpl.
func (r *record) RecordOne(ctx context.Context, num number.Number) {
func (r *record) captureOne(ctx context.Context, num number.Number) {
if r.current == nil {
// The instrument is disabled according to the AggregatorSelector.
return
@@ -515,30 +403,20 @@ func (r *record) unbind() {
func (r *record) mapkey() mapkey {
return mapkey{
descriptor: &r.inst.descriptor,
ordered: r.labels.Equivalent(),
ordered: r.attrs.Equivalent(),
}
}
// fromSync gets a sync implementation object, checking for
// uninitialized instruments and instruments created by another SDK.
func (m *Accumulator) fromSync(sync sdkapi.SyncImpl) *syncInstrument {
if sync != nil {
if inst, ok := sync.Implementation().(*syncInstrument); ok {
return inst
}
}
otel.Handle(ErrUninitializedInstrument)
return nil
}
// fromSync gets an async implementation object, checking for
// uninitialized instruments and instruments created by another SDK.
func (m *Accumulator) fromAsync(async sdkapi.AsyncImpl) *asyncInstrument {
if async != nil {
if inst, ok := async.Implementation().(*asyncInstrument); ok {
return inst
}
func (m *Accumulator) fromAsync(async sdkapi.AsyncImpl) (*asyncInstrument, error) {
if async == nil {
return nil, ErrUninitializedInstrument
}
otel.Handle(ErrUninitializedInstrument)
return nil
inst, ok := async.Implementation().(*asyncInstrument)
if !ok {
return nil, ErrBadInstrument
}
return inst, nil
}

View File

@@ -0,0 +1,70 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
import (
"go.opentelemetry.io/otel/metric/unit"
"go.opentelemetry.io/otel/sdk/metric/number"
)
// Descriptor contains all the settings that describe an instrument,
// including its name, metric kind, number kind, and the configurable
// options.
type Descriptor struct {
name string
instrumentKind InstrumentKind
numberKind number.Kind
description string
unit unit.Unit
}
// NewDescriptor returns a Descriptor with the given contents.
func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor {
return Descriptor{
name: name,
instrumentKind: ikind,
numberKind: nkind,
description: description,
unit: unit,
}
}
// Name returns the metric instrument's name.
func (d Descriptor) Name() string {
return d.name
}
// InstrumentKind returns the specific kind of instrument.
func (d Descriptor) InstrumentKind() InstrumentKind {
return d.instrumentKind
}
// Description provides a human-readable description of the metric
// instrument.
func (d Descriptor) Description() string {
return d.description
}
// Unit describes the units of the metric instrument. Unitless
// metrics return the empty string.
func (d Descriptor) Unit() unit.Unit {
return d.unit
}
// NumberKind returns whether this instrument is declared over int64,
// float64, or uint64 values.
func (d Descriptor) NumberKind() number.Kind {
return d.numberKind
}

View File

@@ -0,0 +1,80 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate stringer -type=InstrumentKind
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
// InstrumentKind describes the kind of instrument.
type InstrumentKind int8
const (
// HistogramInstrumentKind indicates a Histogram instrument.
HistogramInstrumentKind InstrumentKind = iota
// GaugeObserverInstrumentKind indicates an GaugeObserver instrument.
GaugeObserverInstrumentKind
// CounterInstrumentKind indicates a Counter instrument.
CounterInstrumentKind
// UpDownCounterInstrumentKind indicates a UpDownCounter instrument.
UpDownCounterInstrumentKind
// CounterObserverInstrumentKind indicates a CounterObserver instrument.
CounterObserverInstrumentKind
// UpDownCounterObserverInstrumentKind indicates a UpDownCounterObserver
// instrument.
UpDownCounterObserverInstrumentKind
)
// Synchronous returns whether this is a synchronous kind of instrument.
func (k InstrumentKind) Synchronous() bool {
switch k {
case CounterInstrumentKind, UpDownCounterInstrumentKind, HistogramInstrumentKind:
return true
}
return false
}
// Asynchronous returns whether this is an asynchronous kind of instrument.
func (k InstrumentKind) Asynchronous() bool {
return !k.Synchronous()
}
// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping).
func (k InstrumentKind) Adding() bool {
switch k {
case CounterInstrumentKind, UpDownCounterInstrumentKind, CounterObserverInstrumentKind, UpDownCounterObserverInstrumentKind:
return true
}
return false
}
// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding).
func (k InstrumentKind) Grouping() bool {
return !k.Adding()
}
// Monotonic returns whether this kind of instrument exposes a non-decreasing sum.
func (k InstrumentKind) Monotonic() bool {
switch k {
case CounterInstrumentKind, CounterObserverInstrumentKind:
return true
}
return false
}
// PrecomputedSum returns whether this kind of instrument receives precomputed sums.
func (k InstrumentKind) PrecomputedSum() bool {
return k.Adding() && k.Asynchronous()
}

View File

@@ -0,0 +1,28 @@
// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT.
package sdkapi
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[HistogramInstrumentKind-0]
_ = x[GaugeObserverInstrumentKind-1]
_ = x[CounterInstrumentKind-2]
_ = x[UpDownCounterInstrumentKind-3]
_ = x[CounterObserverInstrumentKind-4]
_ = x[UpDownCounterObserverInstrumentKind-5]
}
const _InstrumentKind_name = "HistogramInstrumentKindGaugeObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindCounterObserverInstrumentKindUpDownCounterObserverInstrumentKind"
var _InstrumentKind_index = [...]uint8{0, 23, 50, 71, 98, 127, 162}
func (i InstrumentKind) String() string {
if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) {
return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
}

View File

@@ -0,0 +1,83 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/sdk/metric/number"
) // import (
// "context"
// "go.opentelemetry.io/otel/attribute"
// "go.opentelemetry.io/otel/sdk/metric/number"
// )
type noopInstrument struct {
descriptor Descriptor
}
type noopSyncInstrument struct {
noopInstrument
instrument.Synchronous
}
type noopAsyncInstrument struct {
noopInstrument
instrument.Asynchronous
}
var _ SyncImpl = noopSyncInstrument{}
var _ AsyncImpl = noopAsyncInstrument{}
// NewNoopSyncInstrument returns a No-op implementation of the
// synchronous instrument interface.
func NewNoopSyncInstrument() SyncImpl {
return noopSyncInstrument{
noopInstrument: noopInstrument{
descriptor: Descriptor{
instrumentKind: CounterInstrumentKind,
},
},
}
}
// NewNoopAsyncInstrument returns a No-op implementation of the
// asynchronous instrument interface.
func NewNoopAsyncInstrument() AsyncImpl {
return noopAsyncInstrument{
noopInstrument: noopInstrument{
descriptor: Descriptor{
instrumentKind: CounterObserverInstrumentKind,
},
},
}
}
func (noopInstrument) Implementation() interface{} {
return nil
}
func (n noopInstrument) Descriptor() Descriptor {
return n.descriptor
}
func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) {
}
func (noopAsyncInstrument) ObserveOne(context.Context, number.Number, []attribute.KeyValue) {
}

View File

@@ -0,0 +1,162 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/sdk/metric/number"
)
// MeterImpl is the interface an SDK must implement to supply a Meter
// implementation.
type MeterImpl interface {
// NewSyncInstrument returns a newly constructed
// synchronous instrument implementation or an error, should
// one occur.
NewSyncInstrument(descriptor Descriptor) (SyncImpl, error)
// NewAsyncInstrument returns a newly constructed
// asynchronous instrument implementation or an error, should
// one occur.
NewAsyncInstrument(descriptor Descriptor) (AsyncImpl, error)
// Etc.
RegisterCallback(insts []instrument.Asynchronous, callback func(context.Context)) error
}
// InstrumentImpl is a common interface for synchronous and
// asynchronous instruments.
type InstrumentImpl interface {
// Implementation returns the underlying implementation of the
// instrument, which allows the implementation to gain access
// to its own representation especially from a `Measurement`.
Implementation() interface{}
// Descriptor returns a copy of the instrument's Descriptor.
Descriptor() Descriptor
}
// SyncImpl is the implementation-level interface to a generic
// synchronous instrument (e.g., Histogram and Counter instruments).
type SyncImpl interface {
InstrumentImpl
instrument.Synchronous
// RecordOne captures a single synchronous metric event.
RecordOne(ctx context.Context, number number.Number, attrs []attribute.KeyValue)
}
// AsyncImpl is an implementation-level interface to an
// asynchronous instrument (e.g., Observer instruments).
type AsyncImpl interface {
InstrumentImpl
instrument.Asynchronous
// ObserveOne captures a single synchronous metric event.
ObserveOne(ctx context.Context, number number.Number, attrs []attribute.KeyValue)
}
// AsyncRunner is expected to convert into an AsyncSingleRunner or an
// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner
// does not satisfy one of these interfaces.
type AsyncRunner interface {
// AnyRunner is a non-exported method with no functional use
// other than to make this a non-empty interface.
AnyRunner()
}
// AsyncSingleRunner is an interface implemented by single-observer
// callbacks.
type AsyncSingleRunner interface {
// Run accepts a single instrument and function for capturing
// observations of that instrument. Each call to the function
// receives one captured observation. (The function accepts
// multiple observations so the same implementation can be
// used for batch runners.)
Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation))
AsyncRunner
}
// AsyncBatchRunner is an interface implemented by batch-observer
// callbacks.
type AsyncBatchRunner interface {
// Run accepts a function for capturing observations of
// multiple instruments.
Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation))
AsyncRunner
}
// NewMeasurement constructs a single observation, a binding between
// an asynchronous instrument and a number.
func NewMeasurement(instrument SyncImpl, number number.Number) Measurement {
return Measurement{
instrument: instrument,
number: number,
}
}
// Measurement is a low-level type used with synchronous instruments
// as a direct interface to the SDK via `RecordBatch`.
type Measurement struct {
// number needs to be aligned for 64-bit atomic operations.
number number.Number
instrument SyncImpl
}
// SyncImpl returns the instrument that created this measurement.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (m Measurement) SyncImpl() SyncImpl {
return m.instrument
}
// Number returns a number recorded in this measurement.
func (m Measurement) Number() number.Number {
return m.number
}
// NewObservation constructs a single observation, a binding between
// an asynchronous instrument and a number.
func NewObservation(instrument AsyncImpl, number number.Number) Observation {
return Observation{
instrument: instrument,
number: number,
}
}
// Observation is a low-level type used with asynchronous instruments
// as a direct interface to the SDK via `BatchObserver`.
type Observation struct {
// number needs to be aligned for 64-bit atomic operations.
number number.Number
instrument AsyncImpl
}
// AsyncImpl returns the instrument that created this observation.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (m Observation) AsyncImpl() AsyncImpl {
return m.instrument
}
// Number returns a number recorded in this observation.
func (m Observation) Number() number.Number {
return m.number
}

View File

@@ -0,0 +1,181 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncint64"
"go.opentelemetry.io/otel/sdk/metric/number"
)
type (
meter struct{ MeterImpl }
sfMeter struct{ meter }
siMeter struct{ meter }
afMeter struct{ meter }
aiMeter struct{ meter }
iAdder struct{ SyncImpl }
fAdder struct{ SyncImpl }
iRecorder struct{ SyncImpl }
fRecorder struct{ SyncImpl }
iObserver struct{ AsyncImpl }
fObserver struct{ AsyncImpl }
)
func WrapMeterImpl(impl MeterImpl) metric.Meter {
return meter{impl}
}
func UnwrapMeterImpl(m metric.Meter) MeterImpl {
mm, ok := m.(meter)
if !ok {
return nil
}
return mm.MeterImpl
}
func (m meter) AsyncFloat64() asyncfloat64.InstrumentProvider {
return afMeter{m}
}
func (m meter) AsyncInt64() asyncint64.InstrumentProvider {
return aiMeter{m}
}
func (m meter) SyncFloat64() syncfloat64.InstrumentProvider {
return sfMeter{m}
}
func (m meter) SyncInt64() syncint64.InstrumentProvider {
return siMeter{m}
}
func (m meter) RegisterCallback(insts []instrument.Asynchronous, cb func(ctx context.Context)) error {
return m.MeterImpl.RegisterCallback(insts, cb)
}
func (m meter) newSync(name string, ikind InstrumentKind, nkind number.Kind, opts []instrument.Option) (SyncImpl, error) {
cfg := instrument.NewConfig(opts...)
return m.NewSyncInstrument(NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit()))
}
func (m meter) newAsync(name string, ikind InstrumentKind, nkind number.Kind, opts []instrument.Option) (AsyncImpl, error) {
cfg := instrument.NewConfig(opts...)
return m.NewAsyncInstrument(NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit()))
}
func (m afMeter) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) {
inst, err := m.newAsync(name, CounterObserverInstrumentKind, number.Float64Kind, opts)
return fObserver{inst}, err
}
func (m afMeter) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
inst, err := m.newAsync(name, UpDownCounterObserverInstrumentKind, number.Float64Kind, opts)
return fObserver{inst}, err
}
func (m afMeter) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) {
inst, err := m.newAsync(name, GaugeObserverInstrumentKind, number.Float64Kind, opts)
return fObserver{inst}, err
}
func (m aiMeter) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) {
inst, err := m.newAsync(name, CounterObserverInstrumentKind, number.Int64Kind, opts)
return iObserver{inst}, err
}
func (m aiMeter) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) {
inst, err := m.newAsync(name, UpDownCounterObserverInstrumentKind, number.Int64Kind, opts)
return iObserver{inst}, err
}
func (m aiMeter) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) {
inst, err := m.newAsync(name, GaugeObserverInstrumentKind, number.Int64Kind, opts)
return iObserver{inst}, err
}
func (m sfMeter) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) {
inst, err := m.newSync(name, CounterInstrumentKind, number.Float64Kind, opts)
return fAdder{inst}, err
}
func (m sfMeter) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) {
inst, err := m.newSync(name, UpDownCounterInstrumentKind, number.Float64Kind, opts)
return fAdder{inst}, err
}
func (m sfMeter) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) {
inst, err := m.newSync(name, HistogramInstrumentKind, number.Float64Kind, opts)
return fRecorder{inst}, err
}
func (m siMeter) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) {
inst, err := m.newSync(name, CounterInstrumentKind, number.Int64Kind, opts)
return iAdder{inst}, err
}
func (m siMeter) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) {
inst, err := m.newSync(name, UpDownCounterInstrumentKind, number.Int64Kind, opts)
return iAdder{inst}, err
}
func (m siMeter) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) {
inst, err := m.newSync(name, HistogramInstrumentKind, number.Int64Kind, opts)
return iRecorder{inst}, err
}
func (a fAdder) Add(ctx context.Context, value float64, attrs ...attribute.KeyValue) {
if a.SyncImpl != nil {
a.SyncImpl.RecordOne(ctx, number.NewFloat64Number(value), attrs)
}
}
func (a iAdder) Add(ctx context.Context, value int64, attrs ...attribute.KeyValue) {
if a.SyncImpl != nil {
a.SyncImpl.RecordOne(ctx, number.NewInt64Number(value), attrs)
}
}
func (a fRecorder) Record(ctx context.Context, value float64, attrs ...attribute.KeyValue) {
if a.SyncImpl != nil {
a.SyncImpl.RecordOne(ctx, number.NewFloat64Number(value), attrs)
}
}
func (a iRecorder) Record(ctx context.Context, value int64, attrs ...attribute.KeyValue) {
if a.SyncImpl != nil {
a.SyncImpl.RecordOne(ctx, number.NewInt64Number(value), attrs)
}
}
func (a fObserver) Observe(ctx context.Context, value float64, attrs ...attribute.KeyValue) {
if a.AsyncImpl != nil {
a.AsyncImpl.ObserveOne(ctx, number.NewFloat64Number(value), attrs)
}
}
func (a iObserver) Observe(ctx context.Context, value int64, attrs ...attribute.KeyValue) {
if a.AsyncImpl != nil {
a.AsyncImpl.ObserveOne(ctx, number.NewInt64Number(value), attrs)
}
}

View File

@@ -15,11 +15,12 @@
package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple"
import (
"go.opentelemetry.io/otel/metric/sdkapi"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
)
type (
@@ -50,21 +51,21 @@ func NewWithHistogramDistribution(options ...histogram.Option) export.Aggregator
return selectorHistogram{options: options}
}
func sumAggs(aggPtrs []*export.Aggregator) {
func sumAggs(aggPtrs []*aggregator.Aggregator) {
aggs := sum.New(len(aggPtrs))
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}
}
func lastValueAggs(aggPtrs []*export.Aggregator) {
func lastValueAggs(aggPtrs []*aggregator.Aggregator) {
aggs := lastvalue.New(len(aggPtrs))
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}
}
func (selectorInexpensive) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*export.Aggregator) {
func (selectorInexpensive) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) {
switch descriptor.InstrumentKind() {
case sdkapi.GaugeObserverInstrumentKind:
lastValueAggs(aggPtrs)
@@ -78,7 +79,7 @@ func (selectorInexpensive) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs
}
}
func (s selectorHistogram) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*export.Aggregator) {
func (s selectorHistogram) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) {
switch descriptor.InstrumentKind() {
case sdkapi.GaugeObserverInstrumentKind:
lastValueAggs(aggPtrs)

View File

@@ -27,7 +27,7 @@ var (
ErrPartialResource = errors.New("partial resource")
)
// Detector detects OpenTelemetry resource information
// Detector detects OpenTelemetry resource information.
type Detector interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.

View File

@@ -22,7 +22,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
)
type (
@@ -92,7 +92,7 @@ func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil
}
// Detect implements Detector
// Detect implements Detector.
func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) {
return StringDetector(
semconv.SchemaURL,

View File

@@ -31,7 +31,7 @@ type config struct {
// Option is the interface that applies a configuration option.
type Option interface {
// apply sets the Option value of a config.
apply(*config)
apply(config) config
}
// WithAttributes adds attributes to the configured Resource.
@@ -56,8 +56,9 @@ type detectorsOption struct {
detectors []Detector
}
func (o detectorsOption) apply(cfg *config) {
func (o detectorsOption) apply(cfg config) config {
cfg.detectors = append(cfg.detectors, o.detectors...)
return cfg
}
// WithFromEnv adds attributes from environment variables to the configured resource.
@@ -82,8 +83,9 @@ func WithSchemaURL(schemaURL string) Option {
type schemaURLOption string
func (o schemaURLOption) apply(cfg *config) {
func (o schemaURLOption) apply(cfg config) config {
cfg.schemaURL = string(o)
return cfg
}
// WithOS adds all the OS attributes to the configured Resource.
@@ -108,7 +110,16 @@ func WithOSDescription() Option {
}
// WithProcess adds all the Process attributes to the configured Resource.
// See individual WithProcess* functions to configure specific attributes.
//
// Warning! This option will include process command line arguments. If these
// contain sensitive information it will be included in the exported resource.
//
// This option is equivalent to calling WithProcessPID,
// WithProcessExecutableName, WithProcessExecutablePath,
// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName,
// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each
// option function for information about what resource attributes each
// includes.
func WithProcess() Option {
return WithDetectors(
processPIDDetector{},
@@ -141,7 +152,11 @@ func WithProcessExecutablePath() Option {
}
// WithProcessCommandArgs adds an attribute with all the command arguments (including
// the command/executable itself) as received by the process the configured Resource.
// the command/executable itself) as received by the process to the configured
// Resource.
//
// Warning! This option will include process command line arguments. If these
// contain sensitive information it will be included in the exported resource.
func WithProcessCommandArgs() Option {
return WithDetectors(processCommandArgsDetector{})
}
@@ -169,3 +184,16 @@ func WithProcessRuntimeVersion() Option {
func WithProcessRuntimeDescription() Option {
return WithDetectors(processRuntimeDescriptionDetector{})
}
// WithContainer adds all the Container attributes to the configured Resource.
// See individual WithContainer* functions to configure specific attributes.
func WithContainer() Option {
return WithDetectors(
cgroupContainerIDDetector{},
)
}
// WithContainerID adds an attribute with the id of the container to the configured Resource.
func WithContainerID() Option {
return WithDetectors(cgroupContainerIDDetector{})
}

View File

@@ -0,0 +1,100 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resource // import "go.opentelemetry.io/otel/sdk/resource"
import (
"bufio"
"context"
"errors"
"io"
"os"
"regexp"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
)
type containerIDProvider func() (string, error)
var (
containerID containerIDProvider = getContainerIDFromCGroup
cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`)
)
type cgroupContainerIDDetector struct{}
const cgroupPath = "/proc/self/cgroup"
// Detect returns a *Resource that describes the id of the container.
// If no container id found, an empty resource will be returned.
func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
containerID, err := containerID()
if err != nil {
return nil, err
}
if containerID == "" {
return Empty(), nil
}
return NewWithAttributes(semconv.SchemaURL, semconv.ContainerIDKey.String(containerID)), nil
}
var (
defaultOSStat = os.Stat
osStat = defaultOSStat
defaultOSOpen = func(name string) (io.ReadCloser, error) {
return os.Open(name)
}
osOpen = defaultOSOpen
)
// getContainerIDFromCGroup returns the id of the container from the cgroup file.
// If no container id found, an empty string will be returned.
func getContainerIDFromCGroup() (string, error) {
if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) {
// File does not exist, skip
return "", nil
}
file, err := osOpen(cgroupPath)
if err != nil {
return "", err
}
defer file.Close()
return getContainerIDFromReader(file), nil
}
// getContainerIDFromReader returns the id of the container from reader.
func getContainerIDFromReader(reader io.Reader) string {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
line := scanner.Text()
if id := getContainerIDFromLine(line); id != "" {
return id
}
}
return ""
}
// getContainerIDFromLine returns the id of the container from one string line.
func getContainerIDFromLine(line string) string {
matches := cgroupContainerIDRe.FindStringSubmatch(line)
if len(matches) <= 1 {
return ""
}
return matches[1]
}

View File

@@ -21,7 +21,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
)
const (
@@ -42,10 +42,10 @@ var (
// builtin.
type fromEnv struct{}
// compile time assertion that FromEnv implements Detector interface
// compile time assertion that FromEnv implements Detector interface.
var _ Detector = fromEnv{}
// Detect collects resources from environment
// Detect collects resources from environment.
func (fromEnv) Detect(context.Context) (*Resource, error) {
attrs := strings.TrimSpace(os.Getenv(resourceAttrKey))
svcName := strings.TrimSpace(os.Getenv(svcNameKey))

View File

@@ -19,7 +19,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
)
type osDescriptionProvider func() (string, error)

View File

@@ -22,7 +22,7 @@ import (
"path/filepath"
"runtime"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
)
type pidProvider func() int
@@ -39,7 +39,12 @@ var (
defaultExecutablePathProvider executablePathProvider = os.Executable
defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args }
defaultOwnerProvider ownerProvider = user.Current
defaultRuntimeNameProvider runtimeNameProvider = func() string { return runtime.Compiler }
defaultRuntimeNameProvider runtimeNameProvider = func() string {
if runtime.Compiler == "gc" {
return "go"
}
return runtime.Compiler
}
defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS }
defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH }

View File

@@ -48,7 +48,7 @@ var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflic
func New(ctx context.Context, opts ...Option) (*Resource, error) {
cfg := config{}
for _, opt := range opts {
opt.apply(&cfg)
cfg = opt.apply(cfg)
}
resource, err := Detect(ctx, cfg.detectors...)
@@ -109,6 +109,17 @@ func (r *Resource) String() string {
return r.attrs.Encoded(attribute.DefaultEncoder())
}
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
func (r *Resource) MarshalLog() interface{} {
return struct {
Attributes attribute.Set
SchemaURL string
}{
Attributes: r.attrs,
SchemaURL: r.schemaURL,
}
}
// Attributes returns a copy of attributes from the resource in a sorted order.
// To avoid allocating a new slice, use an iterator.
func (r *Resource) Attributes() []attribute.KeyValue {
@@ -183,7 +194,7 @@ func Merge(a, b *Resource) (*Resource, error) {
mi := attribute.NewMergeIterator(b.Set(), a.Set())
combine := make([]attribute.KeyValue, 0, a.Len()+b.Len())
for mi.Next() {
combine = append(combine, mi.Label())
combine = append(combine, mi.Attribute())
}
merged := NewWithAttributes(schemaURL, combine...)
return merged, nil