mirror of
https://codeberg.org/superseriousbusiness/gotosocial.git
synced 2024-11-28 09:58:53 +03:00
[feature] Initial Prometheus metrics implementation (#2334)
* feat: Initial OTEL metrics * docs: add metrics documentation * fix: metrics endpoint conditional check * feat: metrics endpoint basic auth * fix: make metrics-auth-enabled default false * fix: go fmt helpers.gen.go * fix: add metric-related env vars to envparsing.sh * fix: metrics docs * fix: metrics related stuff in envparsing.sh * fix: metrics docs * chore: metrics docs wording * fix: metrics stuff in envparsing? * bump otel versions --------- Co-authored-by: Tsuribori <user@acertaindebian> Co-authored-by: Tsuribori <none@example.org> Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
This commit is contained in:
parent
16275853eb
commit
1ba3e14b36
251 changed files with 48389 additions and 22 deletions
|
@ -33,6 +33,7 @@ import (
|
||||||
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
|
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/cleaner"
|
"github.com/superseriousbusiness/gotosocial/internal/cleaner"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/metrics"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/middleware"
|
"github.com/superseriousbusiness/gotosocial/internal/middleware"
|
||||||
tlprocessor "github.com/superseriousbusiness/gotosocial/internal/processing/timeline"
|
tlprocessor "github.com/superseriousbusiness/gotosocial/internal/processing/timeline"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/timeline"
|
"github.com/superseriousbusiness/gotosocial/internal/timeline"
|
||||||
|
@ -81,6 +82,11 @@ var Start action.GTSAction = func(ctx context.Context) error {
|
||||||
return fmt.Errorf("error initializing tracing: %w", err)
|
return fmt.Errorf("error initializing tracing: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize Metrics
|
||||||
|
if err := metrics.Initialize(); err != nil {
|
||||||
|
return fmt.Errorf("error initializing metrics: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Open connection to the database
|
// Open connection to the database
|
||||||
dbService, err := bundb.NewBunDBService(ctx, &state)
|
dbService, err := bundb.NewBunDBService(ctx, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -213,13 +219,18 @@ var Start action.GTSAction = func(ctx context.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating router: %s", err)
|
return fmt.Errorf("error creating router: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
middlewares := []gin.HandlerFunc{
|
middlewares := []gin.HandlerFunc{
|
||||||
middleware.AddRequestID(config.GetRequestIDHeader()), // requestID middleware must run before tracing
|
middleware.AddRequestID(config.GetRequestIDHeader()), // requestID middleware must run before tracing
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.GetTracingEnabled() {
|
if config.GetTracingEnabled() {
|
||||||
middlewares = append(middlewares, tracing.InstrumentGin())
|
middlewares = append(middlewares, tracing.InstrumentGin())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.GetMetricsEnabled() {
|
||||||
|
middlewares = append(middlewares, metrics.InstrumentGin())
|
||||||
|
}
|
||||||
|
|
||||||
middlewares = append(middlewares, []gin.HandlerFunc{
|
middlewares = append(middlewares, []gin.HandlerFunc{
|
||||||
// note: hooks adding ctx fields must be ABOVE
|
// note: hooks adding ctx fields must be ABOVE
|
||||||
// the logger, otherwise won't be accessible.
|
// the logger, otherwise won't be accessible.
|
||||||
|
|
|
@ -38,6 +38,7 @@ import (
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/language"
|
"github.com/superseriousbusiness/gotosocial/internal/language"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/metrics"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/middleware"
|
"github.com/superseriousbusiness/gotosocial/internal/middleware"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/oidc"
|
"github.com/superseriousbusiness/gotosocial/internal/oidc"
|
||||||
tlprocessor "github.com/superseriousbusiness/gotosocial/internal/processing/timeline"
|
tlprocessor "github.com/superseriousbusiness/gotosocial/internal/processing/timeline"
|
||||||
|
@ -68,6 +69,10 @@ var Start action.GTSAction = func(ctx context.Context) error {
|
||||||
return fmt.Errorf("error initializing tracing: %w", err)
|
return fmt.Errorf("error initializing tracing: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := metrics.Initialize(); err != nil {
|
||||||
|
return fmt.Errorf("error initializing metrics: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize caches and database
|
// Initialize caches and database
|
||||||
state.DB = testrig.NewTestDB(&state)
|
state.DB = testrig.NewTestDB(&state)
|
||||||
|
|
||||||
|
@ -142,6 +147,11 @@ var Start action.GTSAction = func(ctx context.Context) error {
|
||||||
if config.GetTracingEnabled() {
|
if config.GetTracingEnabled() {
|
||||||
middlewares = append(middlewares, tracing.InstrumentGin())
|
middlewares = append(middlewares, tracing.InstrumentGin())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.GetMetricsEnabled() {
|
||||||
|
middlewares = append(middlewares, metrics.InstrumentGin())
|
||||||
|
}
|
||||||
|
|
||||||
middlewares = append(middlewares, []gin.HandlerFunc{
|
middlewares = append(middlewares, []gin.HandlerFunc{
|
||||||
middleware.Logger(config.GetLogClientIP()),
|
middleware.Logger(config.GetLogClientIP()),
|
||||||
middleware.UserAgent(),
|
middleware.UserAgent(),
|
||||||
|
|
|
@ -14,3 +14,4 @@ We consider these topics advanced because applying them incorrectly does have th
|
||||||
* [Process sandboxing](security/sandboxing.md)
|
* [Process sandboxing](security/sandboxing.md)
|
||||||
* [Firewall configuration](security/firewall.md)
|
* [Firewall configuration](security/firewall.md)
|
||||||
* [Tracing](tracing.md)
|
* [Tracing](tracing.md)
|
||||||
|
* [Metrics](metrics.md)
|
||||||
|
|
37
docs/advanced/metrics.md
Normal file
37
docs/advanced/metrics.md
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
# Metrics
|
||||||
|
|
||||||
|
GoToSocial comes with [OpenTelemetry][otel] based metrics built-in with pull-style Prometheus exporter. Currently the following metrics are collected:
|
||||||
|
|
||||||
|
* Go performance and runtime metrics
|
||||||
|
* Gin (HTTP) metrics
|
||||||
|
* Bun (database) metrics
|
||||||
|
|
||||||
|
How to configure metrics is explained in the [Observability configuration reference][obs].
|
||||||
|
|
||||||
|
For a quickstart, add the following to your GoToSocial configuration and restart your instance:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
metrics-enabled: true
|
||||||
|
metrics-auth-enabled: true
|
||||||
|
metrics-auth-username: some_username
|
||||||
|
metrics-auth-password: some_password
|
||||||
|
```
|
||||||
|
|
||||||
|
This will expose the metrics under the endpoint `/metrics`, protected with HTTP Basic Authentication.
|
||||||
|
|
||||||
|
A following is an example how to configure a job for collecting the metrics in Prometheus `scrape_configs`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- job_name: gotosocial
|
||||||
|
metrics_path: /metrics
|
||||||
|
scheme: https
|
||||||
|
basic_auth:
|
||||||
|
username: some_username
|
||||||
|
password: some_password
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- example.org
|
||||||
|
```
|
||||||
|
|
||||||
|
[otel]: https://opentelemetry.io/
|
||||||
|
[obs]: ../configuration/observability.md
|
|
@ -34,4 +34,20 @@ tracing-endpoint: ""
|
||||||
# Bool. Disable TLS for the gRPC and HTTP transport protocols.
|
# Bool. Disable TLS for the gRPC and HTTP transport protocols.
|
||||||
# Default: false
|
# Default: false
|
||||||
tracing-insecure-transport: false
|
tracing-insecure-transport: false
|
||||||
|
|
||||||
|
# Bool. Enable OpenTelemetry based metrics support.
|
||||||
|
# Default: false
|
||||||
|
metrics-enabled: false
|
||||||
|
|
||||||
|
# Bool. Enable HTTP Basic Authentication for Prometheus metrics endpoint.
|
||||||
|
# Default: false
|
||||||
|
metrics-auth-enabled: false
|
||||||
|
|
||||||
|
# String. Username for Prometheus metrics endpoint.
|
||||||
|
# Default: ""
|
||||||
|
metrics-auth-username: ""
|
||||||
|
|
||||||
|
# String. Password for Prometheus metrics endpoint.
|
||||||
|
# Default: ""
|
||||||
|
metrics-auth-password: ""
|
||||||
```
|
```
|
|
@ -793,6 +793,22 @@ tracing-endpoint: ""
|
||||||
# Default: false
|
# Default: false
|
||||||
tracing-insecure-transport: false
|
tracing-insecure-transport: false
|
||||||
|
|
||||||
|
# Bool. Enable OpenTelemetry based metrics support.
|
||||||
|
# Default: false
|
||||||
|
metrics-enabled: false
|
||||||
|
|
||||||
|
# Bool. Enable HTTP Basic Authentication for Prometheus metrics endpoint.
|
||||||
|
# Default: false
|
||||||
|
metrics-auth-enabled: false
|
||||||
|
|
||||||
|
# String. Username for Prometheus metrics endpoint.
|
||||||
|
# Default: ""
|
||||||
|
metrics-auth-username: ""
|
||||||
|
|
||||||
|
# String. Password for Prometheus metrics endpoint.
|
||||||
|
# Default: ""
|
||||||
|
metrics-auth-password: ""
|
||||||
|
|
||||||
################################
|
################################
|
||||||
##### HTTP CLIENT SETTINGS #####
|
##### HTTP CLIENT SETTINGS #####
|
||||||
################################
|
################################
|
||||||
|
|
18
go.mod
18
go.mod
|
@ -40,6 +40,7 @@ require (
|
||||||
github.com/minio/minio-go/v7 v7.0.63
|
github.com/minio/minio-go/v7 v7.0.63
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.5.0
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
|
github.com/prometheus/client_golang v1.17.0
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/spf13/cobra v1.8.0
|
||||||
github.com/spf13/viper v1.16.0
|
github.com/spf13/viper v1.16.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
|
@ -47,6 +48,7 @@ require (
|
||||||
github.com/superseriousbusiness/exif-terminator v0.5.0
|
github.com/superseriousbusiness/exif-terminator v0.5.0
|
||||||
github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8
|
github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8
|
||||||
github.com/tdewolff/minify/v2 v2.20.7
|
github.com/tdewolff/minify/v2 v2.20.7
|
||||||
|
github.com/technologize/otel-go-contrib v1.1.0
|
||||||
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
|
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
|
||||||
github.com/ulule/limiter/v3 v3.11.2
|
github.com/ulule/limiter/v3 v3.11.2
|
||||||
github.com/uptrace/bun v1.1.16
|
github.com/uptrace/bun v1.1.16
|
||||||
|
@ -56,9 +58,11 @@ require (
|
||||||
github.com/wagslane/go-password-validator v0.3.0
|
github.com/wagslane/go-password-validator v0.3.0
|
||||||
github.com/yuin/goldmark v1.6.0
|
github.com/yuin/goldmark v1.6.0
|
||||||
go.opentelemetry.io/otel v1.21.0
|
go.opentelemetry.io/otel v1.21.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.20.0
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0
|
||||||
go.opentelemetry.io/otel/sdk v1.20.0
|
go.opentelemetry.io/otel/exporters/prometheus v0.43.0
|
||||||
|
go.opentelemetry.io/otel/sdk v1.21.0
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.21.0
|
||||||
go.opentelemetry.io/otel/trace v1.21.0
|
go.opentelemetry.io/otel/trace v1.21.0
|
||||||
go.uber.org/automaxprocs v1.5.3
|
go.uber.org/automaxprocs v1.5.3
|
||||||
golang.org/x/crypto v0.14.0
|
golang.org/x/crypto v0.14.0
|
||||||
|
@ -81,8 +85,10 @@ require (
|
||||||
codeberg.org/gruf/go-mangler v1.2.3 // indirect
|
codeberg.org/gruf/go-mangler v1.2.3 // indirect
|
||||||
codeberg.org/gruf/go-maps v1.0.3 // indirect
|
codeberg.org/gruf/go-maps v1.0.3 // indirect
|
||||||
github.com/aymerick/douceur v0.2.0 // indirect
|
github.com/aymerick/douceur v0.2.0 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/bytedance/sonic v1.9.1 // indirect
|
github.com/bytedance/sonic v1.9.1 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||||
github.com/cilium/ebpf v0.9.1 // indirect
|
github.com/cilium/ebpf v0.9.1 // indirect
|
||||||
github.com/containerd/cgroups/v3 v3.0.1 // indirect
|
github.com/containerd/cgroups/v3 v3.0.1 // indirect
|
||||||
|
@ -131,6 +137,7 @@ require (
|
||||||
github.com/leodido/go-urn v1.2.4 // indirect
|
github.com/leodido/go-urn v1.2.4 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
|
@ -139,6 +146,9 @@ require (
|
||||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
|
github.com/prometheus/common v0.44.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.11.1 // indirect
|
||||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
|
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/rs/xid v1.5.0 // indirect
|
github.com/rs/xid v1.5.0 // indirect
|
||||||
|
@ -156,7 +166,7 @@ require (
|
||||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.2 // indirect
|
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.2 // indirect
|
||||||
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
|
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||||
golang.org/x/arch v0.3.0 // indirect
|
golang.org/x/arch v0.3.0 // indirect
|
||||||
|
|
36
go.sum
36
go.sum
|
@ -91,6 +91,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/
|
||||||
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
||||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
|
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
|
||||||
github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8=
|
github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8=
|
||||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||||
|
@ -99,6 +101,8 @@ github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZX
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||||
|
@ -388,6 +392,8 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58=
|
github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58=
|
||||||
github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs=
|
github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs=
|
||||||
github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE=
|
github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE=
|
||||||
|
@ -432,7 +438,15 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||||
|
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||||
|
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||||
|
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||||
|
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||||
|
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||||
|
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||||
|
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc=
|
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc=
|
||||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
|
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
|
@ -499,6 +513,8 @@ github.com/tdewolff/parse/v2 v2.7.5 h1:RdcN3Ja6zAMSvnxxO047xRoWexX3RrXKi3H6EQHzX
|
||||||
github.com/tdewolff/parse/v2 v2.7.5/go.mod h1:3FbJWZp3XT9OWVN3Hmfp0p/a08v4h8J9W1aghka0soA=
|
github.com/tdewolff/parse/v2 v2.7.5/go.mod h1:3FbJWZp3XT9OWVN3Hmfp0p/a08v4h8J9W1aghka0soA=
|
||||||
github.com/tdewolff/test v1.0.11-0.20231101010635-f1265d231d52 h1:gAQliwn+zJrkjAHVcBEYW/RFvd2St4yYimisvozAYlA=
|
github.com/tdewolff/test v1.0.11-0.20231101010635-f1265d231d52 h1:gAQliwn+zJrkjAHVcBEYW/RFvd2St4yYimisvozAYlA=
|
||||||
github.com/tdewolff/test v1.0.11-0.20231101010635-f1265d231d52/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
|
github.com/tdewolff/test v1.0.11-0.20231101010635-f1265d231d52/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
|
||||||
|
github.com/technologize/otel-go-contrib v1.1.0 h1:gl9bxxJAgXFnKJzoprJOfbvNRE1k3Ky9O7ppVJDb9gg=
|
||||||
|
github.com/technologize/otel-go-contrib v1.1.0/go.mod h1:dCN/wj2WyUO8aFZFdIN+6tfJHImjTML/8r2YVYAy3So=
|
||||||
github.com/tidwall/btree v0.0.0-20191029221954-400434d76274 h1:G6Z6HvJuPjG6XfNGi/feOATzeJrfgTNJY+rGrHbA04E=
|
github.com/tidwall/btree v0.0.0-20191029221954-400434d76274 h1:G6Z6HvJuPjG6XfNGi/feOATzeJrfgTNJY+rGrHbA04E=
|
||||||
github.com/tidwall/btree v0.0.0-20191029221954-400434d76274/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8=
|
github.com/tidwall/btree v0.0.0-20191029221954-400434d76274/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8=
|
||||||
github.com/tidwall/buntdb v1.1.2 h1:noCrqQXL9EKMtcdwJcmuVKSEjqu1ua99RHHgbLTEHRo=
|
github.com/tidwall/buntdb v1.1.2 h1:noCrqQXL9EKMtcdwJcmuVKSEjqu1ua99RHHgbLTEHRo=
|
||||||
|
@ -581,16 +597,20 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.20.0 h1:CsBiKCiQPdSjS+MlRiqeTI9JDDpSuk0Hb6QTRfwer8k=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.20.0/go.mod h1:CMJYNAfooOwSZSAmAeMUV1M+TXld3BiK++z9fqIm2xk=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||||
|
go.opentelemetry.io/otel/exporters/prometheus v0.43.0 h1:Skkl6akzvdWweXX6LLAY29tyFSO6hWZ26uDbVGTDXe8=
|
||||||
|
go.opentelemetry.io/otel/exporters/prometheus v0.43.0/go.mod h1:nZStMoc1H/YJpRjSx9IEX4abBMekORTLQcTUT1CgLkg=
|
||||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||||
go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM=
|
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||||
go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
|
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q=
|
||||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||||
|
|
|
@ -140,6 +140,11 @@ type Configuration struct {
|
||||||
TracingEndpoint string `name:"tracing-endpoint" usage:"Endpoint of your trace collector. Eg., 'localhost:4317' for gRPC, 'localhost:4318' for http"`
|
TracingEndpoint string `name:"tracing-endpoint" usage:"Endpoint of your trace collector. Eg., 'localhost:4317' for gRPC, 'localhost:4318' for http"`
|
||||||
TracingInsecureTransport bool `name:"tracing-insecure-transport" usage:"Disable TLS for the gRPC or HTTP transport protocol"`
|
TracingInsecureTransport bool `name:"tracing-insecure-transport" usage:"Disable TLS for the gRPC or HTTP transport protocol"`
|
||||||
|
|
||||||
|
MetricsEnabled bool `name:"metrics-enabled" usage:"Enable OpenTelemetry based metrics support."`
|
||||||
|
MetricsAuthEnabled bool `name:"metrics-auth-enabled" usage:"Enable HTTP Basic Authentication for Prometheus metrics endpoint"`
|
||||||
|
MetricsAuthUsername string `name:"metrics-auth-username" usage:"Username for Prometheus metrics endpoint"`
|
||||||
|
MetricsAuthPassword string `name:"metrics-auth-password" usage:"Password for Prometheus metrics endpoint"`
|
||||||
|
|
||||||
SMTPHost string `name:"smtp-host" usage:"Host of the smtp server. Eg., 'smtp.eu.mailgun.org'"`
|
SMTPHost string `name:"smtp-host" usage:"Host of the smtp server. Eg., 'smtp.eu.mailgun.org'"`
|
||||||
SMTPPort int `name:"smtp-port" usage:"Port of the smtp server. Eg., 587"`
|
SMTPPort int `name:"smtp-port" usage:"Port of the smtp server. Eg., 587"`
|
||||||
SMTPUsername string `name:"smtp-username" usage:"Username to authenticate with the smtp server as. Eg., 'postmaster@mail.example.org'"`
|
SMTPUsername string `name:"smtp-username" usage:"Username to authenticate with the smtp server as. Eg., 'postmaster@mail.example.org'"`
|
||||||
|
|
|
@ -121,6 +121,9 @@ var Defaults = Configuration{
|
||||||
TracingEndpoint: "",
|
TracingEndpoint: "",
|
||||||
TracingInsecureTransport: false,
|
TracingInsecureTransport: false,
|
||||||
|
|
||||||
|
MetricsEnabled: false,
|
||||||
|
MetricsAuthEnabled: false,
|
||||||
|
|
||||||
SyslogEnabled: false,
|
SyslogEnabled: false,
|
||||||
SyslogProtocol: "udp",
|
SyslogProtocol: "udp",
|
||||||
SyslogAddress: "localhost:514",
|
SyslogAddress: "localhost:514",
|
||||||
|
|
|
@ -2100,6 +2100,106 @@ func GetTracingInsecureTransport() bool { return global.GetTracingInsecureTransp
|
||||||
// SetTracingInsecureTransport safely sets the value for global configuration 'TracingInsecureTransport' field
|
// SetTracingInsecureTransport safely sets the value for global configuration 'TracingInsecureTransport' field
|
||||||
func SetTracingInsecureTransport(v bool) { global.SetTracingInsecureTransport(v) }
|
func SetTracingInsecureTransport(v bool) { global.SetTracingInsecureTransport(v) }
|
||||||
|
|
||||||
|
// GetMetricsEnabled safely fetches the Configuration value for state's 'MetricsEnabled' field
|
||||||
|
func (st *ConfigState) GetMetricsEnabled() (v bool) {
|
||||||
|
st.mutex.RLock()
|
||||||
|
v = st.config.MetricsEnabled
|
||||||
|
st.mutex.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetricsEnabled safely sets the Configuration value for state's 'MetricsEnabled' field
|
||||||
|
func (st *ConfigState) SetMetricsEnabled(v bool) {
|
||||||
|
st.mutex.Lock()
|
||||||
|
defer st.mutex.Unlock()
|
||||||
|
st.config.MetricsEnabled = v
|
||||||
|
st.reloadToViper()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricsEnabledFlag returns the flag name for the 'MetricsEnabled' field
|
||||||
|
func MetricsEnabledFlag() string { return "metrics-enabled" }
|
||||||
|
|
||||||
|
// GetMetricsEnabled safely fetches the value for global configuration 'MetricsEnabled' field
|
||||||
|
func GetMetricsEnabled() bool { return global.GetMetricsEnabled() }
|
||||||
|
|
||||||
|
// SetMetricsEnabled safely sets the value for global configuration 'MetricsEnabled' field
|
||||||
|
func SetMetricsEnabled(v bool) { global.SetMetricsEnabled(v) }
|
||||||
|
|
||||||
|
// GetMetricsAuthEnabled safely fetches the Configuration value for state's 'MetricsAuthEnabled' field
|
||||||
|
func (st *ConfigState) GetMetricsAuthEnabled() (v bool) {
|
||||||
|
st.mutex.RLock()
|
||||||
|
v = st.config.MetricsAuthEnabled
|
||||||
|
st.mutex.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetricsAuthEnabled safely sets the Configuration value for state's 'MetricsAuthEnabled' field
|
||||||
|
func (st *ConfigState) SetMetricsAuthEnabled(v bool) {
|
||||||
|
st.mutex.Lock()
|
||||||
|
defer st.mutex.Unlock()
|
||||||
|
st.config.MetricsAuthEnabled = v
|
||||||
|
st.reloadToViper()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricsAuthEnabledFlag returns the flag name for the 'MetricsAuthEnabled' field
|
||||||
|
func MetricsAuthEnabledFlag() string { return "metrics-auth-enabled" }
|
||||||
|
|
||||||
|
// GetMetricsAuthEnabled safely fetches the value for global configuration 'MetricsAuthEnabled' field
|
||||||
|
func GetMetricsAuthEnabled() bool { return global.GetMetricsAuthEnabled() }
|
||||||
|
|
||||||
|
// SetMetricsAuthEnabled safely sets the value for global configuration 'MetricsAuthEnabled' field
|
||||||
|
func SetMetricsAuthEnabled(v bool) { global.SetMetricsAuthEnabled(v) }
|
||||||
|
|
||||||
|
// GetMetricsAuthUsername safely fetches the Configuration value for state's 'MetricsAuthUsername' field
|
||||||
|
func (st *ConfigState) GetMetricsAuthUsername() (v string) {
|
||||||
|
st.mutex.RLock()
|
||||||
|
v = st.config.MetricsAuthUsername
|
||||||
|
st.mutex.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetricsAuthUsername safely sets the Configuration value for state's 'MetricsAuthUsername' field
|
||||||
|
func (st *ConfigState) SetMetricsAuthUsername(v string) {
|
||||||
|
st.mutex.Lock()
|
||||||
|
defer st.mutex.Unlock()
|
||||||
|
st.config.MetricsAuthUsername = v
|
||||||
|
st.reloadToViper()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricsAuthUsernameFlag returns the flag name for the 'MetricsAuthUsername' field
|
||||||
|
func MetricsAuthUsernameFlag() string { return "metrics-auth-username" }
|
||||||
|
|
||||||
|
// GetMetricsAuthUsername safely fetches the value for global configuration 'MetricsAuthUsername' field
|
||||||
|
func GetMetricsAuthUsername() string { return global.GetMetricsAuthUsername() }
|
||||||
|
|
||||||
|
// SetMetricsAuthUsername safely sets the value for global configuration 'MetricsAuthUsername' field
|
||||||
|
func SetMetricsAuthUsername(v string) { global.SetMetricsAuthUsername(v) }
|
||||||
|
|
||||||
|
// GetMetricsAuthPassword safely fetches the Configuration value for state's 'MetricsAuthPassword' field
|
||||||
|
func (st *ConfigState) GetMetricsAuthPassword() (v string) {
|
||||||
|
st.mutex.RLock()
|
||||||
|
v = st.config.MetricsAuthPassword
|
||||||
|
st.mutex.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetricsAuthPassword safely sets the Configuration value for state's 'MetricsAuthPassword' field
|
||||||
|
func (st *ConfigState) SetMetricsAuthPassword(v string) {
|
||||||
|
st.mutex.Lock()
|
||||||
|
defer st.mutex.Unlock()
|
||||||
|
st.config.MetricsAuthPassword = v
|
||||||
|
st.reloadToViper()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricsAuthPasswordFlag returns the flag name for the 'MetricsAuthPassword' field
|
||||||
|
func MetricsAuthPasswordFlag() string { return "metrics-auth-password" }
|
||||||
|
|
||||||
|
// GetMetricsAuthPassword safely fetches the value for global configuration 'MetricsAuthPassword' field
|
||||||
|
func GetMetricsAuthPassword() string { return global.GetMetricsAuthPassword() }
|
||||||
|
|
||||||
|
// SetMetricsAuthPassword safely sets the value for global configuration 'MetricsAuthPassword' field
|
||||||
|
func SetMetricsAuthPassword(v string) { global.SetMetricsAuthPassword(v) }
|
||||||
|
|
||||||
// GetSMTPHost safely fetches the Configuration value for state's 'SMTPHost' field
|
// GetSMTPHost safely fetches the Configuration value for state's 'SMTPHost' field
|
||||||
func (st *ConfigState) GetSMTPHost() (v string) {
|
func (st *ConfigState) GetSMTPHost() (v string) {
|
||||||
st.mutex.RLock()
|
st.mutex.RLock()
|
||||||
|
|
|
@ -40,6 +40,7 @@ import (
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/db/bundb/migrations"
|
"github.com/superseriousbusiness/gotosocial/internal/db/bundb/migrations"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/metrics"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/state"
|
"github.com/superseriousbusiness/gotosocial/internal/state"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/tracing"
|
"github.com/superseriousbusiness/gotosocial/internal/tracing"
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
|
@ -142,6 +143,9 @@ func NewBunDBService(ctx context.Context, state *state.State) (db.DB, error) {
|
||||||
if config.GetTracingEnabled() {
|
if config.GetTracingEnabled() {
|
||||||
db.AddQueryHook(tracing.InstrumentBun())
|
db.AddQueryHook(tracing.InstrumentBun())
|
||||||
}
|
}
|
||||||
|
if config.GetMetricsEnabled() {
|
||||||
|
db.AddQueryHook(metrics.InstrumentBun())
|
||||||
|
}
|
||||||
|
|
||||||
// table registration is needed for many-to-many, see:
|
// table registration is needed for many-to-many, see:
|
||||||
// https://bun.uptrace.dev/orm/many-to-many-relation/
|
// https://bun.uptrace.dev/orm/many-to-many-relation/
|
||||||
|
|
82
internal/metrics/metrics.go
Normal file
82
internal/metrics/metrics.go
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !nometrics
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
|
"github.com/technologize/otel-go-contrib/otelginmetrics"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/extra/bunotel"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/prometheus"
|
||||||
|
sdk "go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
serviceName = "GoToSocial"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Initialize() error {
|
||||||
|
if !config.GetMetricsEnabled() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.GetMetricsAuthEnabled() {
|
||||||
|
if config.GetMetricsAuthPassword() == "" || config.GetMetricsAuthUsername() == "" {
|
||||||
|
return errors.New("metrics-auth-username and metrics-auth-password must be set when metrics-auth-enabled is true")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r, _ := resource.Merge(
|
||||||
|
resource.Default(),
|
||||||
|
resource.NewWithAttributes(
|
||||||
|
semconv.SchemaURL,
|
||||||
|
semconv.ServiceName(serviceName),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheusExporter, err := prometheus.New()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
meterProvider := sdk.NewMeterProvider(
|
||||||
|
sdk.WithResource(r),
|
||||||
|
sdk.WithReader(prometheusExporter),
|
||||||
|
)
|
||||||
|
otel.SetMeterProvider(meterProvider)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func InstrumentGin() gin.HandlerFunc {
|
||||||
|
return otelginmetrics.Middleware(serviceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func InstrumentBun() bun.QueryHook {
|
||||||
|
return bunotel.NewQueryHook(
|
||||||
|
bunotel.WithMeterProvider(otel.GetMeterProvider()),
|
||||||
|
)
|
||||||
|
}
|
43
internal/metrics/no_metrics.go
Normal file
43
internal/metrics/no_metrics.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build nometrics
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Initialize() error {
|
||||||
|
if config.GetMetricsEnabled() {
|
||||||
|
return errors.New("metrics was disabled at build time")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func InstrumentGin() gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
func InstrumentBun() bun.QueryHook {
|
||||||
|
return nil
|
||||||
|
}
|
33
internal/web/metrics.go
Normal file
33
internal/web/metrics.go
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package web
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
metricsPath = "/metrics"
|
||||||
|
metricsUser = "metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (m *Module) metricsGETHandler(c *gin.Context) {
|
||||||
|
h := promhttp.Handler()
|
||||||
|
h.ServeHTTP(c.Writer, c.Request)
|
||||||
|
}
|
|
@ -110,6 +110,19 @@ func (m *Module) Route(r *router.Router, mi ...gin.HandlerFunc) {
|
||||||
r.AttachHandler(http.MethodGet, domainBlockListPath, m.domainBlockListGETHandler)
|
r.AttachHandler(http.MethodGet, domainBlockListPath, m.domainBlockListGETHandler)
|
||||||
r.AttachHandler(http.MethodGet, tagsPath, m.tagGETHandler)
|
r.AttachHandler(http.MethodGet, tagsPath, m.tagGETHandler)
|
||||||
|
|
||||||
|
// Prometheus metrics export endpoint
|
||||||
|
if config.GetMetricsEnabled() {
|
||||||
|
metricsGroup := r.AttachGroup(metricsPath)
|
||||||
|
metricsGroup.Use(mi...)
|
||||||
|
// Attach basic auth if enabled
|
||||||
|
if config.GetMetricsAuthEnabled() {
|
||||||
|
metricsGroup.Use(gin.BasicAuth(gin.Accounts{
|
||||||
|
config.GetMetricsAuthUsername(): config.GetMetricsAuthPassword(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
metricsGroup.Handle(http.MethodGet, "", m.metricsGETHandler)
|
||||||
|
}
|
||||||
|
|
||||||
// Attach redirects from old endpoints to current ones for backwards compatibility
|
// Attach redirects from old endpoints to current ones for backwards compatibility
|
||||||
r.AttachHandler(http.MethodGet, "/auth/edit", func(c *gin.Context) { c.Redirect(http.StatusMovedPermanently, userPanelPath) })
|
r.AttachHandler(http.MethodGet, "/auth/edit", func(c *gin.Context) { c.Redirect(http.StatusMovedPermanently, userPanelPath) })
|
||||||
r.AttachHandler(http.MethodGet, "/user", func(c *gin.Context) { c.Redirect(http.StatusMovedPermanently, userPanelPath) })
|
r.AttachHandler(http.MethodGet, "/user", func(c *gin.Context) { c.Redirect(http.StatusMovedPermanently, userPanelPath) })
|
||||||
|
|
|
@ -99,6 +99,7 @@ nav:
|
||||||
- "advanced/security/sandboxing.md"
|
- "advanced/security/sandboxing.md"
|
||||||
- "advanced/security/firewall.md"
|
- "advanced/security/firewall.md"
|
||||||
- "advanced/tracing.md"
|
- "advanced/tracing.md"
|
||||||
|
- "advanced/metrics.md"
|
||||||
|
|
||||||
- "Admin":
|
- "Admin":
|
||||||
- "admin/settings.md"
|
- "admin/settings.md"
|
||||||
|
|
|
@ -18,6 +18,7 @@ GO_GCFLAGS=${GO_GCFLAGS-}
|
||||||
# - kvformat: enables prettier output of log fields (slightly better performance)
|
# - kvformat: enables prettier output of log fields (slightly better performance)
|
||||||
# - timetzdata: embed timezone database inside binary (allow setting local time inside Docker containers, at cost of 450KB)
|
# - timetzdata: embed timezone database inside binary (allow setting local time inside Docker containers, at cost of 450KB)
|
||||||
# - notracing: disables compiling-in otel tracing support (reduced binary size, better performance)
|
# - notracing: disables compiling-in otel tracing support (reduced binary size, better performance)
|
||||||
|
# - nometrics: disables compiling-in otel metrics support (reduced binary size, better performance)
|
||||||
# - noerrcaller: disables caller function prefix in errors (slightly better performance, at cost of err readability)
|
# - noerrcaller: disables caller function prefix in errors (slightly better performance, at cost of err readability)
|
||||||
# - debug: enables /debug/pprof endpoint (adds debug, at performance cost)
|
# - debug: enables /debug/pprof endpoint (adds debug, at performance cost)
|
||||||
# - debugenv: enables /debug/pprof endpoint if DEBUG=1 env during runtime (adds debug, at performance cost)
|
# - debugenv: enables /debug/pprof endpoint if DEBUG=1 env during runtime (adds debug, at performance cost)
|
||||||
|
|
|
@ -110,6 +110,10 @@ EXPECT=$(cat << "EOF"
|
||||||
"media-image-max-size": 420,
|
"media-image-max-size": 420,
|
||||||
"media-remote-cache-days": 30,
|
"media-remote-cache-days": 30,
|
||||||
"media-video-max-size": 420,
|
"media-video-max-size": 420,
|
||||||
|
"metrics-auth-enabled": false,
|
||||||
|
"metrics-auth-password": "",
|
||||||
|
"metrics-auth-username": "",
|
||||||
|
"metrics-enabled": false,
|
||||||
"oidc-admin-groups": [
|
"oidc-admin-groups": [
|
||||||
"steamy"
|
"steamy"
|
||||||
],
|
],
|
||||||
|
@ -219,6 +223,8 @@ GTS_MEDIA_DESCRIPTION_MAX_CHARS=5000 \
|
||||||
GTS_MEDIA_REMOTE_CACHE_DAYS=30 \
|
GTS_MEDIA_REMOTE_CACHE_DAYS=30 \
|
||||||
GTS_MEDIA_EMOJI_LOCAL_MAX_SIZE=420 \
|
GTS_MEDIA_EMOJI_LOCAL_MAX_SIZE=420 \
|
||||||
GTS_MEDIA_EMOJI_REMOTE_MAX_SIZE=420 \
|
GTS_MEDIA_EMOJI_REMOTE_MAX_SIZE=420 \
|
||||||
|
GTS_METRICS_AUTH_ENABLED=false \
|
||||||
|
GTS_METRICS_ENABLED=false \
|
||||||
GTS_STORAGE_BACKEND='local' \
|
GTS_STORAGE_BACKEND='local' \
|
||||||
GTS_STORAGE_LOCAL_BASE_PATH='/root/store' \
|
GTS_STORAGE_LOCAL_BASE_PATH='/root/store' \
|
||||||
GTS_STORAGE_S3_ACCESS_KEY='minio' \
|
GTS_STORAGE_S3_ACCESS_KEY='minio' \
|
||||||
|
|
|
@ -132,6 +132,9 @@ var testDefaults = config.Configuration{
|
||||||
TracingTransport: "grpc",
|
TracingTransport: "grpc",
|
||||||
TracingInsecureTransport: true,
|
TracingInsecureTransport: true,
|
||||||
|
|
||||||
|
MetricsEnabled: false,
|
||||||
|
MetricsAuthEnabled: false,
|
||||||
|
|
||||||
SyslogEnabled: false,
|
SyslogEnabled: false,
|
||||||
SyslogProtocol: "udp",
|
SyslogProtocol: "udp",
|
||||||
SyslogAddress: "localhost:514",
|
SyslogAddress: "localhost:514",
|
||||||
|
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
Copyright (C) 2013 Blake Mizerany
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
|
@ -0,0 +1,316 @@
|
||||||
|
// Package quantile computes approximate quantiles over an unbounded data
|
||||||
|
// stream within low memory and CPU bounds.
|
||||||
|
//
|
||||||
|
// A small amount of accuracy is traded to achieve the above properties.
|
||||||
|
//
|
||||||
|
// Multiple streams can be merged before calling Query to generate a single set
|
||||||
|
// of results. This is meaningful when the streams represent the same type of
|
||||||
|
// data. See Merge and Samples.
|
||||||
|
//
|
||||||
|
// For more detailed information about the algorithm used, see:
|
||||||
|
//
|
||||||
|
// Effective Computation of Biased Quantiles over Data Streams
|
||||||
|
//
|
||||||
|
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||||
|
package quantile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sample holds an observed value and meta information for compression. JSON
|
||||||
|
// tags have been added for convenience.
|
||||||
|
type Sample struct {
|
||||||
|
Value float64 `json:",string"`
|
||||||
|
Width float64 `json:",string"`
|
||||||
|
Delta float64 `json:",string"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples represents a slice of samples. It implements sort.Interface.
|
||||||
|
type Samples []Sample
|
||||||
|
|
||||||
|
func (a Samples) Len() int { return len(a) }
|
||||||
|
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||||
|
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
|
||||||
|
type invariant func(s *stream, r float64) float64
|
||||||
|
|
||||||
|
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the lower ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewLowBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * r
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the higher ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewHighBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * (s.n - r)
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||||
|
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||||
|
// space and computation time. The targets map maps the desired quantiles to
|
||||||
|
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||||
|
// is guaranteed to be within (Quantile±Epsilon).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||||
|
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||||
|
// Convert map to slice to avoid slow iterations on a map.
|
||||||
|
// ƒ is called on the hot path, so converting the map to a slice
|
||||||
|
// beforehand results in significant CPU savings.
|
||||||
|
targets := targetMapToSlice(targetMap)
|
||||||
|
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
var m = math.MaxFloat64
|
||||||
|
var f float64
|
||||||
|
for _, t := range targets {
|
||||||
|
if t.quantile*s.n <= r {
|
||||||
|
f = (2 * t.epsilon * r) / t.quantile
|
||||||
|
} else {
|
||||||
|
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||||
|
}
|
||||||
|
if f < m {
|
||||||
|
m = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
type target struct {
|
||||||
|
quantile float64
|
||||||
|
epsilon float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||||
|
targets := make([]target, 0, len(targetMap))
|
||||||
|
|
||||||
|
for quantile, epsilon := range targetMap {
|
||||||
|
t := target{
|
||||||
|
quantile: quantile,
|
||||||
|
epsilon: epsilon,
|
||||||
|
}
|
||||||
|
targets = append(targets, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return targets
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||||
|
// design. Take care when using across multiple goroutines.
|
||||||
|
type Stream struct {
|
||||||
|
*stream
|
||||||
|
b Samples
|
||||||
|
sorted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStream(ƒ invariant) *Stream {
|
||||||
|
x := &stream{ƒ: ƒ}
|
||||||
|
return &Stream{x, make(Samples, 0, 500), true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts v into the stream.
|
||||||
|
func (s *Stream) Insert(v float64) {
|
||||||
|
s.insert(Sample{Value: v, Width: 1})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) insert(sample Sample) {
|
||||||
|
s.b = append(s.b, sample)
|
||||||
|
s.sorted = false
|
||||||
|
if len(s.b) == cap(s.b) {
|
||||||
|
s.flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns the computed qth percentiles value. If s was created with
|
||||||
|
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||||
|
// will return an unspecified result.
|
||||||
|
func (s *Stream) Query(q float64) float64 {
|
||||||
|
if !s.flushed() {
|
||||||
|
// Fast path when there hasn't been enough data for a flush;
|
||||||
|
// this also yields better accuracy for small sets of data.
|
||||||
|
l := len(s.b)
|
||||||
|
if l == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
i := int(math.Ceil(float64(l) * q))
|
||||||
|
if i > 0 {
|
||||||
|
i -= 1
|
||||||
|
}
|
||||||
|
s.maybeSort()
|
||||||
|
return s.b[i].Value
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.query(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges samples into the underlying streams samples. This is handy when
|
||||||
|
// merging multiple streams from separate threads, database shards, etc.
|
||||||
|
//
|
||||||
|
// ATTENTION: This method is broken and does not yield correct results. The
|
||||||
|
// underlying algorithm is not capable of merging streams correctly.
|
||||||
|
func (s *Stream) Merge(samples Samples) {
|
||||||
|
sort.Sort(samples)
|
||||||
|
s.stream.merge(samples)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||||
|
func (s *Stream) Reset() {
|
||||||
|
s.stream.reset()
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples returns stream samples held by s.
|
||||||
|
func (s *Stream) Samples() Samples {
|
||||||
|
if !s.flushed() {
|
||||||
|
return s.b
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.samples()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the total number of samples observed in the stream
|
||||||
|
// since initialization.
|
||||||
|
func (s *Stream) Count() int {
|
||||||
|
return len(s.b) + s.stream.count()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flush() {
|
||||||
|
s.maybeSort()
|
||||||
|
s.stream.merge(s.b)
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) maybeSort() {
|
||||||
|
if !s.sorted {
|
||||||
|
s.sorted = true
|
||||||
|
sort.Sort(s.b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flushed() bool {
|
||||||
|
return len(s.stream.l) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type stream struct {
|
||||||
|
n float64
|
||||||
|
l []Sample
|
||||||
|
ƒ invariant
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) reset() {
|
||||||
|
s.l = s.l[:0]
|
||||||
|
s.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) insert(v float64) {
|
||||||
|
s.merge(Samples{{v, 1, 0}})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) merge(samples Samples) {
|
||||||
|
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||||
|
// whole summaries. The paper doesn't mention merging summaries at
|
||||||
|
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||||
|
// do merges properly.
|
||||||
|
var r float64
|
||||||
|
i := 0
|
||||||
|
for _, sample := range samples {
|
||||||
|
for ; i < len(s.l); i++ {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Value > sample.Value {
|
||||||
|
// Insert at position i.
|
||||||
|
s.l = append(s.l, Sample{})
|
||||||
|
copy(s.l[i+1:], s.l[i:])
|
||||||
|
s.l[i] = Sample{
|
||||||
|
sample.Value,
|
||||||
|
sample.Width,
|
||||||
|
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||||
|
// TODO(beorn7): How to calculate delta correctly?
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
goto inserted
|
||||||
|
}
|
||||||
|
r += c.Width
|
||||||
|
}
|
||||||
|
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||||
|
i++
|
||||||
|
inserted:
|
||||||
|
s.n += sample.Width
|
||||||
|
r += sample.Width
|
||||||
|
}
|
||||||
|
s.compress()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) count() int {
|
||||||
|
return int(s.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) query(q float64) float64 {
|
||||||
|
t := math.Ceil(q * s.n)
|
||||||
|
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||||
|
p := s.l[0]
|
||||||
|
var r float64
|
||||||
|
for _, c := range s.l[1:] {
|
||||||
|
r += p.Width
|
||||||
|
if r+c.Width+c.Delta > t {
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
p = c
|
||||||
|
}
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) compress() {
|
||||||
|
if len(s.l) < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
x := s.l[len(s.l)-1]
|
||||||
|
xi := len(s.l) - 1
|
||||||
|
r := s.n - 1 - x.Width
|
||||||
|
|
||||||
|
for i := len(s.l) - 2; i >= 0; i-- {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||||
|
x.Width += c.Width
|
||||||
|
s.l[xi] = x
|
||||||
|
// Remove element at i.
|
||||||
|
copy(s.l[i:], s.l[i+1:])
|
||||||
|
s.l = s.l[:len(s.l)-1]
|
||||||
|
xi -= 1
|
||||||
|
} else {
|
||||||
|
x = c
|
||||||
|
xi = i
|
||||||
|
}
|
||||||
|
r -= c.Width
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) samples() Samples {
|
||||||
|
samples := make(Samples, len(s.l))
|
||||||
|
copy(samples, s.l)
|
||||||
|
return samples
|
||||||
|
}
|
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
Copyright (c) 2016 Caleb Spare
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
72
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
72
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
# xxhash
|
||||||
|
|
||||||
|
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||||
|
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||||
|
|
||||||
|
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||||
|
high-quality hashing algorithm that is much faster than anything in the Go
|
||||||
|
standard library.
|
||||||
|
|
||||||
|
This package provides a straightforward API:
|
||||||
|
|
||||||
|
```
|
||||||
|
func Sum64(b []byte) uint64
|
||||||
|
func Sum64String(s string) uint64
|
||||||
|
type Digest struct{ ... }
|
||||||
|
func New() *Digest
|
||||||
|
```
|
||||||
|
|
||||||
|
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||||
|
|
||||||
|
```
|
||||||
|
func (*Digest) Write([]byte) (int, error)
|
||||||
|
func (*Digest) WriteString(string) (int, error)
|
||||||
|
func (*Digest) Sum64() uint64
|
||||||
|
```
|
||||||
|
|
||||||
|
The package is written with optimized pure Go and also contains even faster
|
||||||
|
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||||
|
opts into using the Go code even on those architectures.
|
||||||
|
|
||||||
|
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This package is in a module and the latest code is in version 2 of the module.
|
||||||
|
You need a version of Go with at least "minimal module compatibility" to use
|
||||||
|
github.com/cespare/xxhash/v2:
|
||||||
|
|
||||||
|
* 1.9.7+ for Go 1.9
|
||||||
|
* 1.10.3+ for Go 1.10
|
||||||
|
* Go 1.11 or later
|
||||||
|
|
||||||
|
I recommend using the latest release of Go.
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||||
|
implementations of Sum64.
|
||||||
|
|
||||||
|
| input size | purego | asm |
|
||||||
|
| ---------- | --------- | --------- |
|
||||||
|
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||||
|
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||||
|
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||||
|
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||||
|
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||||
|
|
||||||
|
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||||
|
CPU using the following commands under Go 1.19.2:
|
||||||
|
|
||||||
|
```
|
||||||
|
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
|
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
|
```
|
||||||
|
|
||||||
|
## Projects using this package
|
||||||
|
|
||||||
|
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||||
|
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||||
|
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||||
|
- [FreeCache](https://github.com/coocood/freecache)
|
||||||
|
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
# Small convenience script for running the tests with various combinations of
|
||||||
|
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||||
|
|
||||||
|
go test ./...
|
||||||
|
go test -tags purego ./...
|
||||||
|
GOARCH=arm64 go test
|
||||||
|
GOARCH=arm64 go test -tags purego
|
228
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
228
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
|
@ -0,0 +1,228 @@
|
||||||
|
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||||
|
// at http://cyan4973.github.io/xxHash/.
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"math/bits"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
prime1 uint64 = 11400714785074694791
|
||||||
|
prime2 uint64 = 14029467366897019727
|
||||||
|
prime3 uint64 = 1609587929392839161
|
||||||
|
prime4 uint64 = 9650029242287828579
|
||||||
|
prime5 uint64 = 2870177450012600261
|
||||||
|
)
|
||||||
|
|
||||||
|
// Store the primes in an array as well.
|
||||||
|
//
|
||||||
|
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||||
|
// contiguous array of the assembly code.
|
||||||
|
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||||
|
|
||||||
|
// Digest implements hash.Hash64.
|
||||||
|
type Digest struct {
|
||||||
|
v1 uint64
|
||||||
|
v2 uint64
|
||||||
|
v3 uint64
|
||||||
|
v4 uint64
|
||||||
|
total uint64
|
||||||
|
mem [32]byte
|
||||||
|
n int // how much of mem is used
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||||
|
func New() *Digest {
|
||||||
|
var d Digest
|
||||||
|
d.Reset()
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset clears the Digest's state so that it can be reused.
|
||||||
|
func (d *Digest) Reset() {
|
||||||
|
d.v1 = primes[0] + prime2
|
||||||
|
d.v2 = prime2
|
||||||
|
d.v3 = 0
|
||||||
|
d.v4 = -primes[0]
|
||||||
|
d.total = 0
|
||||||
|
d.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size always returns 8 bytes.
|
||||||
|
func (d *Digest) Size() int { return 8 }
|
||||||
|
|
||||||
|
// BlockSize always returns 32 bytes.
|
||||||
|
func (d *Digest) BlockSize() int { return 32 }
|
||||||
|
|
||||||
|
// Write adds more data to d. It always returns len(b), nil.
|
||||||
|
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||||
|
n = len(b)
|
||||||
|
d.total += uint64(n)
|
||||||
|
|
||||||
|
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||||
|
|
||||||
|
if d.n+n < 32 {
|
||||||
|
// This new data doesn't even fill the current block.
|
||||||
|
copy(memleft, b)
|
||||||
|
d.n += n
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.n > 0 {
|
||||||
|
// Finish off the partial block.
|
||||||
|
c := copy(memleft, b)
|
||||||
|
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||||
|
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||||
|
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||||
|
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||||
|
b = b[c:]
|
||||||
|
d.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b) >= 32 {
|
||||||
|
// One or more full blocks left.
|
||||||
|
nw := writeBlocks(d, b)
|
||||||
|
b = b[nw:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store any remaining partial block.
|
||||||
|
copy(d.mem[:], b)
|
||||||
|
d.n = len(b)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum appends the current hash to b and returns the resulting slice.
|
||||||
|
func (d *Digest) Sum(b []byte) []byte {
|
||||||
|
s := d.Sum64()
|
||||||
|
return append(
|
||||||
|
b,
|
||||||
|
byte(s>>56),
|
||||||
|
byte(s>>48),
|
||||||
|
byte(s>>40),
|
||||||
|
byte(s>>32),
|
||||||
|
byte(s>>24),
|
||||||
|
byte(s>>16),
|
||||||
|
byte(s>>8),
|
||||||
|
byte(s),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum64 returns the current hash.
|
||||||
|
func (d *Digest) Sum64() uint64 {
|
||||||
|
var h uint64
|
||||||
|
|
||||||
|
if d.total >= 32 {
|
||||||
|
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||||
|
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||||
|
h = mergeRound(h, v1)
|
||||||
|
h = mergeRound(h, v2)
|
||||||
|
h = mergeRound(h, v3)
|
||||||
|
h = mergeRound(h, v4)
|
||||||
|
} else {
|
||||||
|
h = d.v3 + prime5
|
||||||
|
}
|
||||||
|
|
||||||
|
h += d.total
|
||||||
|
|
||||||
|
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||||
|
for ; len(b) >= 8; b = b[8:] {
|
||||||
|
k1 := round(0, u64(b[:8]))
|
||||||
|
h ^= k1
|
||||||
|
h = rol27(h)*prime1 + prime4
|
||||||
|
}
|
||||||
|
if len(b) >= 4 {
|
||||||
|
h ^= uint64(u32(b[:4])) * prime1
|
||||||
|
h = rol23(h)*prime2 + prime3
|
||||||
|
b = b[4:]
|
||||||
|
}
|
||||||
|
for ; len(b) > 0; b = b[1:] {
|
||||||
|
h ^= uint64(b[0]) * prime5
|
||||||
|
h = rol11(h) * prime1
|
||||||
|
}
|
||||||
|
|
||||||
|
h ^= h >> 33
|
||||||
|
h *= prime2
|
||||||
|
h ^= h >> 29
|
||||||
|
h *= prime3
|
||||||
|
h ^= h >> 32
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
magic = "xxh\x06"
|
||||||
|
marshaledSize = len(magic) + 8*5 + 32
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||||
|
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||||
|
b := make([]byte, 0, marshaledSize)
|
||||||
|
b = append(b, magic...)
|
||||||
|
b = appendUint64(b, d.v1)
|
||||||
|
b = appendUint64(b, d.v2)
|
||||||
|
b = appendUint64(b, d.v3)
|
||||||
|
b = appendUint64(b, d.v4)
|
||||||
|
b = appendUint64(b, d.total)
|
||||||
|
b = append(b, d.mem[:d.n]...)
|
||||||
|
b = b[:len(b)+len(d.mem)-d.n]
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||||
|
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||||
|
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||||
|
return errors.New("xxhash: invalid hash state identifier")
|
||||||
|
}
|
||||||
|
if len(b) != marshaledSize {
|
||||||
|
return errors.New("xxhash: invalid hash state size")
|
||||||
|
}
|
||||||
|
b = b[len(magic):]
|
||||||
|
b, d.v1 = consumeUint64(b)
|
||||||
|
b, d.v2 = consumeUint64(b)
|
||||||
|
b, d.v3 = consumeUint64(b)
|
||||||
|
b, d.v4 = consumeUint64(b)
|
||||||
|
b, d.total = consumeUint64(b)
|
||||||
|
copy(d.mem[:], b)
|
||||||
|
d.n = int(d.total % uint64(len(d.mem)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendUint64(b []byte, x uint64) []byte {
|
||||||
|
var a [8]byte
|
||||||
|
binary.LittleEndian.PutUint64(a[:], x)
|
||||||
|
return append(b, a[:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||||
|
x := u64(b)
|
||||||
|
return b[8:], x
|
||||||
|
}
|
||||||
|
|
||||||
|
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||||
|
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||||
|
|
||||||
|
func round(acc, input uint64) uint64 {
|
||||||
|
acc += input * prime2
|
||||||
|
acc = rol31(acc)
|
||||||
|
acc *= prime1
|
||||||
|
return acc
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeRound(acc, val uint64) uint64 {
|
||||||
|
val = round(0, val)
|
||||||
|
acc ^= val
|
||||||
|
acc = acc*prime1 + prime4
|
||||||
|
return acc
|
||||||
|
}
|
||||||
|
|
||||||
|
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||||
|
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||||
|
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||||
|
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||||
|
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||||
|
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||||
|
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||||
|
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
209
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
209
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,209 @@
|
||||||
|
//go:build !appengine && gc && !purego
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// Registers:
|
||||||
|
#define h AX
|
||||||
|
#define d AX
|
||||||
|
#define p SI // pointer to advance through b
|
||||||
|
#define n DX
|
||||||
|
#define end BX // loop end
|
||||||
|
#define v1 R8
|
||||||
|
#define v2 R9
|
||||||
|
#define v3 R10
|
||||||
|
#define v4 R11
|
||||||
|
#define x R12
|
||||||
|
#define prime1 R13
|
||||||
|
#define prime2 R14
|
||||||
|
#define prime4 DI
|
||||||
|
|
||||||
|
#define round(acc, x) \
|
||||||
|
IMULQ prime2, x \
|
||||||
|
ADDQ x, acc \
|
||||||
|
ROLQ $31, acc \
|
||||||
|
IMULQ prime1, acc
|
||||||
|
|
||||||
|
// round0 performs the operation x = round(0, x).
|
||||||
|
#define round0(x) \
|
||||||
|
IMULQ prime2, x \
|
||||||
|
ROLQ $31, x \
|
||||||
|
IMULQ prime1, x
|
||||||
|
|
||||||
|
// mergeRound applies a merge round on the two registers acc and x.
|
||||||
|
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||||
|
#define mergeRound(acc, x) \
|
||||||
|
round0(x) \
|
||||||
|
XORQ x, acc \
|
||||||
|
IMULQ prime1, acc \
|
||||||
|
ADDQ prime4, acc
|
||||||
|
|
||||||
|
// blockLoop processes as many 32-byte blocks as possible,
|
||||||
|
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||||
|
// to process.
|
||||||
|
#define blockLoop() \
|
||||||
|
loop: \
|
||||||
|
MOVQ +0(p), x \
|
||||||
|
round(v1, x) \
|
||||||
|
MOVQ +8(p), x \
|
||||||
|
round(v2, x) \
|
||||||
|
MOVQ +16(p), x \
|
||||||
|
round(v3, x) \
|
||||||
|
MOVQ +24(p), x \
|
||||||
|
round(v4, x) \
|
||||||
|
ADDQ $32, p \
|
||||||
|
CMPQ p, end \
|
||||||
|
JLE loop
|
||||||
|
|
||||||
|
// func Sum64(b []byte) uint64
|
||||||
|
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
|
// Load fixed primes.
|
||||||
|
MOVQ ·primes+0(SB), prime1
|
||||||
|
MOVQ ·primes+8(SB), prime2
|
||||||
|
MOVQ ·primes+24(SB), prime4
|
||||||
|
|
||||||
|
// Load slice.
|
||||||
|
MOVQ b_base+0(FP), p
|
||||||
|
MOVQ b_len+8(FP), n
|
||||||
|
LEAQ (p)(n*1), end
|
||||||
|
|
||||||
|
// The first loop limit will be len(b)-32.
|
||||||
|
SUBQ $32, end
|
||||||
|
|
||||||
|
// Check whether we have at least one block.
|
||||||
|
CMPQ n, $32
|
||||||
|
JLT noBlocks
|
||||||
|
|
||||||
|
// Set up initial state (v1, v2, v3, v4).
|
||||||
|
MOVQ prime1, v1
|
||||||
|
ADDQ prime2, v1
|
||||||
|
MOVQ prime2, v2
|
||||||
|
XORQ v3, v3
|
||||||
|
XORQ v4, v4
|
||||||
|
SUBQ prime1, v4
|
||||||
|
|
||||||
|
blockLoop()
|
||||||
|
|
||||||
|
MOVQ v1, h
|
||||||
|
ROLQ $1, h
|
||||||
|
MOVQ v2, x
|
||||||
|
ROLQ $7, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v3, x
|
||||||
|
ROLQ $12, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v4, x
|
||||||
|
ROLQ $18, x
|
||||||
|
ADDQ x, h
|
||||||
|
|
||||||
|
mergeRound(h, v1)
|
||||||
|
mergeRound(h, v2)
|
||||||
|
mergeRound(h, v3)
|
||||||
|
mergeRound(h, v4)
|
||||||
|
|
||||||
|
JMP afterBlocks
|
||||||
|
|
||||||
|
noBlocks:
|
||||||
|
MOVQ ·primes+32(SB), h
|
||||||
|
|
||||||
|
afterBlocks:
|
||||||
|
ADDQ n, h
|
||||||
|
|
||||||
|
ADDQ $24, end
|
||||||
|
CMPQ p, end
|
||||||
|
JG try4
|
||||||
|
|
||||||
|
loop8:
|
||||||
|
MOVQ (p), x
|
||||||
|
ADDQ $8, p
|
||||||
|
round0(x)
|
||||||
|
XORQ x, h
|
||||||
|
ROLQ $27, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
ADDQ prime4, h
|
||||||
|
|
||||||
|
CMPQ p, end
|
||||||
|
JLE loop8
|
||||||
|
|
||||||
|
try4:
|
||||||
|
ADDQ $4, end
|
||||||
|
CMPQ p, end
|
||||||
|
JG try1
|
||||||
|
|
||||||
|
MOVL (p), x
|
||||||
|
ADDQ $4, p
|
||||||
|
IMULQ prime1, x
|
||||||
|
XORQ x, h
|
||||||
|
|
||||||
|
ROLQ $23, h
|
||||||
|
IMULQ prime2, h
|
||||||
|
ADDQ ·primes+16(SB), h
|
||||||
|
|
||||||
|
try1:
|
||||||
|
ADDQ $4, end
|
||||||
|
CMPQ p, end
|
||||||
|
JGE finalize
|
||||||
|
|
||||||
|
loop1:
|
||||||
|
MOVBQZX (p), x
|
||||||
|
ADDQ $1, p
|
||||||
|
IMULQ ·primes+32(SB), x
|
||||||
|
XORQ x, h
|
||||||
|
ROLQ $11, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
|
||||||
|
CMPQ p, end
|
||||||
|
JL loop1
|
||||||
|
|
||||||
|
finalize:
|
||||||
|
MOVQ h, x
|
||||||
|
SHRQ $33, x
|
||||||
|
XORQ x, h
|
||||||
|
IMULQ prime2, h
|
||||||
|
MOVQ h, x
|
||||||
|
SHRQ $29, x
|
||||||
|
XORQ x, h
|
||||||
|
IMULQ ·primes+16(SB), h
|
||||||
|
MOVQ h, x
|
||||||
|
SHRQ $32, x
|
||||||
|
XORQ x, h
|
||||||
|
|
||||||
|
MOVQ h, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
|
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||||
|
// Load fixed primes needed for round.
|
||||||
|
MOVQ ·primes+0(SB), prime1
|
||||||
|
MOVQ ·primes+8(SB), prime2
|
||||||
|
|
||||||
|
// Load slice.
|
||||||
|
MOVQ b_base+8(FP), p
|
||||||
|
MOVQ b_len+16(FP), n
|
||||||
|
LEAQ (p)(n*1), end
|
||||||
|
SUBQ $32, end
|
||||||
|
|
||||||
|
// Load vN from d.
|
||||||
|
MOVQ s+0(FP), d
|
||||||
|
MOVQ 0(d), v1
|
||||||
|
MOVQ 8(d), v2
|
||||||
|
MOVQ 16(d), v3
|
||||||
|
MOVQ 24(d), v4
|
||||||
|
|
||||||
|
// We don't need to check the loop condition here; this function is
|
||||||
|
// always called with at least one block of data to process.
|
||||||
|
blockLoop()
|
||||||
|
|
||||||
|
// Copy vN back to d.
|
||||||
|
MOVQ v1, 0(d)
|
||||||
|
MOVQ v2, 8(d)
|
||||||
|
MOVQ v3, 16(d)
|
||||||
|
MOVQ v4, 24(d)
|
||||||
|
|
||||||
|
// The number of bytes written is p minus the old base pointer.
|
||||||
|
SUBQ b_base+8(FP), p
|
||||||
|
MOVQ p, ret+32(FP)
|
||||||
|
|
||||||
|
RET
|
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
|
@ -0,0 +1,183 @@
|
||||||
|
//go:build !appengine && gc && !purego
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// Registers:
|
||||||
|
#define digest R1
|
||||||
|
#define h R2 // return value
|
||||||
|
#define p R3 // input pointer
|
||||||
|
#define n R4 // input length
|
||||||
|
#define nblocks R5 // n / 32
|
||||||
|
#define prime1 R7
|
||||||
|
#define prime2 R8
|
||||||
|
#define prime3 R9
|
||||||
|
#define prime4 R10
|
||||||
|
#define prime5 R11
|
||||||
|
#define v1 R12
|
||||||
|
#define v2 R13
|
||||||
|
#define v3 R14
|
||||||
|
#define v4 R15
|
||||||
|
#define x1 R20
|
||||||
|
#define x2 R21
|
||||||
|
#define x3 R22
|
||||||
|
#define x4 R23
|
||||||
|
|
||||||
|
#define round(acc, x) \
|
||||||
|
MADD prime2, acc, x, acc \
|
||||||
|
ROR $64-31, acc \
|
||||||
|
MUL prime1, acc
|
||||||
|
|
||||||
|
// round0 performs the operation x = round(0, x).
|
||||||
|
#define round0(x) \
|
||||||
|
MUL prime2, x \
|
||||||
|
ROR $64-31, x \
|
||||||
|
MUL prime1, x
|
||||||
|
|
||||||
|
#define mergeRound(acc, x) \
|
||||||
|
round0(x) \
|
||||||
|
EOR x, acc \
|
||||||
|
MADD acc, prime4, prime1, acc
|
||||||
|
|
||||||
|
// blockLoop processes as many 32-byte blocks as possible,
|
||||||
|
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||||
|
#define blockLoop() \
|
||||||
|
LSR $5, n, nblocks \
|
||||||
|
PCALIGN $16 \
|
||||||
|
loop: \
|
||||||
|
LDP.P 16(p), (x1, x2) \
|
||||||
|
LDP.P 16(p), (x3, x4) \
|
||||||
|
round(v1, x1) \
|
||||||
|
round(v2, x2) \
|
||||||
|
round(v3, x3) \
|
||||||
|
round(v4, x4) \
|
||||||
|
SUB $1, nblocks \
|
||||||
|
CBNZ nblocks, loop
|
||||||
|
|
||||||
|
// func Sum64(b []byte) uint64
|
||||||
|
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
|
LDP b_base+0(FP), (p, n)
|
||||||
|
|
||||||
|
LDP ·primes+0(SB), (prime1, prime2)
|
||||||
|
LDP ·primes+16(SB), (prime3, prime4)
|
||||||
|
MOVD ·primes+32(SB), prime5
|
||||||
|
|
||||||
|
CMP $32, n
|
||||||
|
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||||
|
BLT afterLoop
|
||||||
|
|
||||||
|
ADD prime1, prime2, v1
|
||||||
|
MOVD prime2, v2
|
||||||
|
MOVD $0, v3
|
||||||
|
NEG prime1, v4
|
||||||
|
|
||||||
|
blockLoop()
|
||||||
|
|
||||||
|
ROR $64-1, v1, x1
|
||||||
|
ROR $64-7, v2, x2
|
||||||
|
ADD x1, x2
|
||||||
|
ROR $64-12, v3, x3
|
||||||
|
ROR $64-18, v4, x4
|
||||||
|
ADD x3, x4
|
||||||
|
ADD x2, x4, h
|
||||||
|
|
||||||
|
mergeRound(h, v1)
|
||||||
|
mergeRound(h, v2)
|
||||||
|
mergeRound(h, v3)
|
||||||
|
mergeRound(h, v4)
|
||||||
|
|
||||||
|
afterLoop:
|
||||||
|
ADD n, h
|
||||||
|
|
||||||
|
TBZ $4, n, try8
|
||||||
|
LDP.P 16(p), (x1, x2)
|
||||||
|
|
||||||
|
round0(x1)
|
||||||
|
|
||||||
|
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||||
|
// rotated register) is worth a small but measurable speedup for small
|
||||||
|
// inputs.
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x1 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
round0(x2)
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x2 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
try8:
|
||||||
|
TBZ $3, n, try4
|
||||||
|
MOVD.P 8(p), x1
|
||||||
|
|
||||||
|
round0(x1)
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x1 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
try4:
|
||||||
|
TBZ $2, n, try2
|
||||||
|
MOVWU.P 4(p), x2
|
||||||
|
|
||||||
|
MUL prime1, x2
|
||||||
|
ROR $64-23, h
|
||||||
|
EOR x2 @> 64-23, h, h
|
||||||
|
MADD h, prime3, prime2, h
|
||||||
|
|
||||||
|
try2:
|
||||||
|
TBZ $1, n, try1
|
||||||
|
MOVHU.P 2(p), x3
|
||||||
|
AND $255, x3, x1
|
||||||
|
LSR $8, x3, x2
|
||||||
|
|
||||||
|
MUL prime5, x1
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x1 @> 64-11, h, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
MUL prime5, x2
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x2 @> 64-11, h, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
try1:
|
||||||
|
TBZ $0, n, finalize
|
||||||
|
MOVBU (p), x4
|
||||||
|
|
||||||
|
MUL prime5, x4
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x4 @> 64-11, h, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
finalize:
|
||||||
|
EOR h >> 33, h
|
||||||
|
MUL prime2, h
|
||||||
|
EOR h >> 29, h
|
||||||
|
MUL prime3, h
|
||||||
|
EOR h >> 32, h
|
||||||
|
|
||||||
|
MOVD h, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
|
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||||
|
LDP ·primes+0(SB), (prime1, prime2)
|
||||||
|
|
||||||
|
// Load state. Assume v[1-4] are stored contiguously.
|
||||||
|
MOVD d+0(FP), digest
|
||||||
|
LDP 0(digest), (v1, v2)
|
||||||
|
LDP 16(digest), (v3, v4)
|
||||||
|
|
||||||
|
LDP b_base+8(FP), (p, n)
|
||||||
|
|
||||||
|
blockLoop()
|
||||||
|
|
||||||
|
// Store updated state.
|
||||||
|
STP (v1, v2), 0(digest)
|
||||||
|
STP (v3, v4), 16(digest)
|
||||||
|
|
||||||
|
BIC $31, n
|
||||||
|
MOVD n, ret+32(FP)
|
||||||
|
RET
|
15
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
Normal file
15
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||||
|
// +build amd64 arm64
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
// Sum64 computes the 64-bit xxHash digest of b.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func Sum64(b []byte) uint64
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func writeBlocks(d *Digest, b []byte) int
|
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||||
|
// +build !amd64,!arm64 appengine !gc purego
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
// Sum64 computes the 64-bit xxHash digest of b.
|
||||||
|
func Sum64(b []byte) uint64 {
|
||||||
|
// A simpler version would be
|
||||||
|
// d := New()
|
||||||
|
// d.Write(b)
|
||||||
|
// return d.Sum64()
|
||||||
|
// but this is faster, particularly for small inputs.
|
||||||
|
|
||||||
|
n := len(b)
|
||||||
|
var h uint64
|
||||||
|
|
||||||
|
if n >= 32 {
|
||||||
|
v1 := primes[0] + prime2
|
||||||
|
v2 := prime2
|
||||||
|
v3 := uint64(0)
|
||||||
|
v4 := -primes[0]
|
||||||
|
for len(b) >= 32 {
|
||||||
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||||
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||||
|
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||||
|
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||||
|
b = b[32:len(b):len(b)]
|
||||||
|
}
|
||||||
|
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||||
|
h = mergeRound(h, v1)
|
||||||
|
h = mergeRound(h, v2)
|
||||||
|
h = mergeRound(h, v3)
|
||||||
|
h = mergeRound(h, v4)
|
||||||
|
} else {
|
||||||
|
h = prime5
|
||||||
|
}
|
||||||
|
|
||||||
|
h += uint64(n)
|
||||||
|
|
||||||
|
for ; len(b) >= 8; b = b[8:] {
|
||||||
|
k1 := round(0, u64(b[:8]))
|
||||||
|
h ^= k1
|
||||||
|
h = rol27(h)*prime1 + prime4
|
||||||
|
}
|
||||||
|
if len(b) >= 4 {
|
||||||
|
h ^= uint64(u32(b[:4])) * prime1
|
||||||
|
h = rol23(h)*prime2 + prime3
|
||||||
|
b = b[4:]
|
||||||
|
}
|
||||||
|
for ; len(b) > 0; b = b[1:] {
|
||||||
|
h ^= uint64(b[0]) * prime5
|
||||||
|
h = rol11(h) * prime1
|
||||||
|
}
|
||||||
|
|
||||||
|
h ^= h >> 33
|
||||||
|
h *= prime2
|
||||||
|
h ^= h >> 29
|
||||||
|
h *= prime3
|
||||||
|
h ^= h >> 32
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeBlocks(d *Digest, b []byte) int {
|
||||||
|
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||||
|
n := len(b)
|
||||||
|
for len(b) >= 32 {
|
||||||
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||||
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||||
|
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||||
|
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||||
|
b = b[32:len(b):len(b)]
|
||||||
|
}
|
||||||
|
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||||
|
return n - len(b)
|
||||||
|
}
|
16
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
16
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
//go:build appengine
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
// Sum64String computes the 64-bit xxHash digest of s.
|
||||||
|
func Sum64String(s string) uint64 {
|
||||||
|
return Sum64([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteString adds more data to d. It always returns len(s), nil.
|
||||||
|
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||||
|
return d.Write([]byte(s))
|
||||||
|
}
|
58
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
58
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
//go:build !appengine
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
// This file encapsulates usage of unsafe.
|
||||||
|
// xxhash_safe.go contains the safe implementations.
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// In the future it's possible that compiler optimizations will make these
|
||||||
|
// XxxString functions unnecessary by realizing that calls such as
|
||||||
|
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||||
|
// If that happens, even if we keep these functions they can be replaced with
|
||||||
|
// the trivial safe code.
|
||||||
|
|
||||||
|
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||||
|
//
|
||||||
|
// var b []byte
|
||||||
|
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||||
|
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||||
|
// bh.Len = len(s)
|
||||||
|
// bh.Cap = len(s)
|
||||||
|
//
|
||||||
|
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||||
|
// weight to this sequence of expressions that any function that uses it will
|
||||||
|
// not be inlined. Instead, the functions below use a different unsafe
|
||||||
|
// conversion designed to minimize the inliner weight and allow both to be
|
||||||
|
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||||
|
// inlined.
|
||||||
|
//
|
||||||
|
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||||
|
|
||||||
|
// Sum64String computes the 64-bit xxHash digest of s.
|
||||||
|
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||||
|
func Sum64String(s string) uint64 {
|
||||||
|
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||||
|
return Sum64(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteString adds more data to d. It always returns len(s), nil.
|
||||||
|
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||||
|
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||||
|
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||||
|
// d.Write always returns len(s), nil.
|
||||||
|
// Ignoring the return output and returning these fixed values buys a
|
||||||
|
// savings of 6 in the inliner's cost model.
|
||||||
|
return len(s), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||||
|
// of the first two words is the same as the layout of a string.
|
||||||
|
type sliceHeader struct {
|
||||||
|
s string
|
||||||
|
cap int
|
||||||
|
}
|
201
vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
Normal file
201
vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
1
vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
Normal file
1
vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
|
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
Normal file
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
cover.dat
|
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
Normal file
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
all:
|
||||||
|
|
||||||
|
cover:
|
||||||
|
go test -cover -v -coverprofile=cover.dat ./...
|
||||||
|
go tool cover -func cover.dat
|
||||||
|
|
||||||
|
.PHONY: cover
|
75
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
Normal file
75
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
// Copyright 2013 Matt T. Proud
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pbutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errInvalidVarint = errors.New("invalid varint32 encountered")
|
||||||
|
|
||||||
|
// ReadDelimited decodes a message from the provided length-delimited stream,
|
||||||
|
// where the length is encoded as 32-bit varint prefix to the message body.
|
||||||
|
// It returns the total number of bytes read and any applicable error. This is
|
||||||
|
// roughly equivalent to the companion Java API's
|
||||||
|
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
|
||||||
|
// calls r.Read repeatedly as required until exactly one message including its
|
||||||
|
// prefix is read and decoded (or an error has occurred). The function never
|
||||||
|
// reads more bytes from the stream than required. The function never returns
|
||||||
|
// an error if a message has been read and decoded correctly, even if the end
|
||||||
|
// of the stream has been reached in doing so. In that case, any subsequent
|
||||||
|
// calls return (0, io.EOF).
|
||||||
|
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
||||||
|
// Per AbstractParser#parsePartialDelimitedFrom with
|
||||||
|
// CodedInputStream#readRawVarint32.
|
||||||
|
var headerBuf [binary.MaxVarintLen32]byte
|
||||||
|
var bytesRead, varIntBytes int
|
||||||
|
var messageLength uint64
|
||||||
|
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
||||||
|
if bytesRead >= len(headerBuf) {
|
||||||
|
return bytesRead, errInvalidVarint
|
||||||
|
}
|
||||||
|
// We have to read byte by byte here to avoid reading more bytes
|
||||||
|
// than required. Each read byte is appended to what we have
|
||||||
|
// read before.
|
||||||
|
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
|
||||||
|
if newBytesRead == 0 {
|
||||||
|
if err != nil {
|
||||||
|
return bytesRead, err
|
||||||
|
}
|
||||||
|
// A Reader should not return (0, nil), but if it does,
|
||||||
|
// it should be treated as no-op (according to the
|
||||||
|
// Reader contract). So let's go on...
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bytesRead += newBytesRead
|
||||||
|
// Now present everything read so far to the varint decoder and
|
||||||
|
// see if a varint can be decoded already.
|
||||||
|
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
|
||||||
|
}
|
||||||
|
|
||||||
|
messageBuf := make([]byte, messageLength)
|
||||||
|
newBytesRead, err := io.ReadFull(r, messageBuf)
|
||||||
|
bytesRead += newBytesRead
|
||||||
|
if err != nil {
|
||||||
|
return bytesRead, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytesRead, proto.Unmarshal(messageBuf, m)
|
||||||
|
}
|
16
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
generated
vendored
Normal file
16
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright 2013 Matt T. Proud
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package pbutil provides record length-delimited Protocol Buffer streaming.
|
||||||
|
package pbutil
|
46
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
Normal file
46
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
// Copyright 2013 Matt T. Proud
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pbutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriteDelimited encodes and dumps a message to the provided writer prefixed
|
||||||
|
// with a 32-bit varint indicating the length of the encoded message, producing
|
||||||
|
// a length-delimited record stream, which can be used to chain together
|
||||||
|
// encoded messages of the same type together in a file. It returns the total
|
||||||
|
// number of bytes written and any applicable error. This is roughly
|
||||||
|
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
|
||||||
|
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
|
||||||
|
buffer, err := proto.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf [binary.MaxVarintLen32]byte
|
||||||
|
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
|
||||||
|
|
||||||
|
sync, err := w.Write(buf[:encodedLength])
|
||||||
|
if err != nil {
|
||||||
|
return sync, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = w.Write(buffer)
|
||||||
|
return n + sync, err
|
||||||
|
}
|
201
vendor/github.com/prometheus/client_golang/LICENSE
generated
vendored
Normal file
201
vendor/github.com/prometheus/client_golang/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
23
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
Normal file
23
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
Prometheus instrumentation library for Go applications
|
||||||
|
Copyright 2012-2015 The Prometheus Authors
|
||||||
|
|
||||||
|
This product includes software developed at
|
||||||
|
SoundCloud Ltd. (http://soundcloud.com/).
|
||||||
|
|
||||||
|
|
||||||
|
The following components are included in this product:
|
||||||
|
|
||||||
|
perks - a fork of https://github.com/bmizerany/perks
|
||||||
|
https://github.com/beorn7/perks
|
||||||
|
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
|
||||||
|
See https://github.com/beorn7/perks/blob/master/README.md for license details.
|
||||||
|
|
||||||
|
Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
http://github.com/golang/protobuf/
|
||||||
|
Copyright 2010 The Go Authors
|
||||||
|
See source code for license details.
|
||||||
|
|
||||||
|
Support for streaming Protocol Buffer messages for the Go language (golang).
|
||||||
|
https://github.com/matttproud/golang_protobuf_extensions
|
||||||
|
Copyright 2013 Matt T. Proud
|
||||||
|
Licensed under the Apache License, Version 2.0
|
1
vendor/github.com/prometheus/client_golang/prometheus/.gitignore
generated
vendored
Normal file
1
vendor/github.com/prometheus/client_golang/prometheus/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
command-line-arguments.test
|
1
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
Normal file
1
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
|
38
vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
generated
vendored
Normal file
38
vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "runtime/debug"
|
||||||
|
|
||||||
|
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewBuildInfoCollector instead.
|
||||||
|
func NewBuildInfoCollector() Collector {
|
||||||
|
path, version, sum := "unknown", "unknown", "unknown"
|
||||||
|
if bi, ok := debug.ReadBuildInfo(); ok {
|
||||||
|
path = bi.Main.Path
|
||||||
|
version = bi.Main.Version
|
||||||
|
sum = bi.Main.Sum
|
||||||
|
}
|
||||||
|
c := &selfCollector{MustNewConstMetric(
|
||||||
|
NewDesc(
|
||||||
|
"go_build_info",
|
||||||
|
"Build information about the main Go module.",
|
||||||
|
nil, Labels{"path": path, "version": version, "checksum": sum},
|
||||||
|
),
|
||||||
|
GaugeValue, 1)}
|
||||||
|
c.init(c.self)
|
||||||
|
return c
|
||||||
|
}
|
128
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
Normal file
128
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// Collector is the interface implemented by anything that can be used by
|
||||||
|
// Prometheus to collect metrics. A Collector has to be registered for
|
||||||
|
// collection. See Registerer.Register.
|
||||||
|
//
|
||||||
|
// The stock metrics provided by this package (Gauge, Counter, Summary,
|
||||||
|
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
|
||||||
|
// namely itself). An implementer of Collector may, however, collect multiple
|
||||||
|
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
|
||||||
|
// for collectors already implemented in this library are the metric vectors
|
||||||
|
// (i.e. collection of multiple instances of the same Metric but with different
|
||||||
|
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||||
|
type Collector interface {
|
||||||
|
// Describe sends the super-set of all possible descriptors of metrics
|
||||||
|
// collected by this Collector to the provided channel and returns once
|
||||||
|
// the last descriptor has been sent. The sent descriptors fulfill the
|
||||||
|
// consistency and uniqueness requirements described in the Desc
|
||||||
|
// documentation.
|
||||||
|
//
|
||||||
|
// It is valid if one and the same Collector sends duplicate
|
||||||
|
// descriptors. Those duplicates are simply ignored. However, two
|
||||||
|
// different Collectors must not send duplicate descriptors.
|
||||||
|
//
|
||||||
|
// Sending no descriptor at all marks the Collector as “unchecked”,
|
||||||
|
// i.e. no checks will be performed at registration time, and the
|
||||||
|
// Collector may yield any Metric it sees fit in its Collect method.
|
||||||
|
//
|
||||||
|
// This method idempotently sends the same descriptors throughout the
|
||||||
|
// lifetime of the Collector. It may be called concurrently and
|
||||||
|
// therefore must be implemented in a concurrency safe way.
|
||||||
|
//
|
||||||
|
// If a Collector encounters an error while executing this method, it
|
||||||
|
// must send an invalid descriptor (created with NewInvalidDesc) to
|
||||||
|
// signal the error to the registry.
|
||||||
|
Describe(chan<- *Desc)
|
||||||
|
// Collect is called by the Prometheus registry when collecting
|
||||||
|
// metrics. The implementation sends each collected metric via the
|
||||||
|
// provided channel and returns once the last metric has been sent. The
|
||||||
|
// descriptor of each sent metric is one of those returned by Describe
|
||||||
|
// (unless the Collector is unchecked, see above). Returned metrics that
|
||||||
|
// share the same descriptor must differ in their variable label
|
||||||
|
// values.
|
||||||
|
//
|
||||||
|
// This method may be called concurrently and must therefore be
|
||||||
|
// implemented in a concurrency safe way. Blocking occurs at the expense
|
||||||
|
// of total performance of rendering all registered metrics. Ideally,
|
||||||
|
// Collector implementations support concurrent readers.
|
||||||
|
Collect(chan<- Metric)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DescribeByCollect is a helper to implement the Describe method of a custom
|
||||||
|
// Collector. It collects the metrics from the provided Collector and sends
|
||||||
|
// their descriptors to the provided channel.
|
||||||
|
//
|
||||||
|
// If a Collector collects the same metrics throughout its lifetime, its
|
||||||
|
// Describe method can simply be implemented as:
|
||||||
|
//
|
||||||
|
// func (c customCollector) Describe(ch chan<- *Desc) {
|
||||||
|
// DescribeByCollect(c, ch)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// However, this will not work if the metrics collected change dynamically over
|
||||||
|
// the lifetime of the Collector in a way that their combined set of descriptors
|
||||||
|
// changes as well. The shortcut implementation will then violate the contract
|
||||||
|
// of the Describe method. If a Collector sometimes collects no metrics at all
|
||||||
|
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
||||||
|
// metrics after a metric with a fully specified label set has been accessed),
|
||||||
|
// it might even get registered as an unchecked Collector (cf. the Register
|
||||||
|
// method of the Registerer interface). Hence, only use this shortcut
|
||||||
|
// implementation of Describe if you are certain to fulfill the contract.
|
||||||
|
//
|
||||||
|
// The Collector example demonstrates a use of DescribeByCollect.
|
||||||
|
func DescribeByCollect(c Collector, descs chan<- *Desc) {
|
||||||
|
metrics := make(chan Metric)
|
||||||
|
go func() {
|
||||||
|
c.Collect(metrics)
|
||||||
|
close(metrics)
|
||||||
|
}()
|
||||||
|
for m := range metrics {
|
||||||
|
descs <- m.Desc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// selfCollector implements Collector for a single Metric so that the Metric
|
||||||
|
// collects itself. Add it as an anonymous field to a struct that implements
|
||||||
|
// Metric, and call init with the Metric itself as an argument.
|
||||||
|
type selfCollector struct {
|
||||||
|
self Metric
|
||||||
|
}
|
||||||
|
|
||||||
|
// init provides the selfCollector with a reference to the metric it is supposed
|
||||||
|
// to collect. It is usually called within the factory function to create a
|
||||||
|
// metric. See example.
|
||||||
|
func (c *selfCollector) init(self Metric) {
|
||||||
|
c.self = self
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe implements Collector.
|
||||||
|
func (c *selfCollector) Describe(ch chan<- *Desc) {
|
||||||
|
ch <- c.self.Desc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect implements Collector.
|
||||||
|
func (c *selfCollector) Collect(ch chan<- Metric) {
|
||||||
|
ch <- c.self
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectorMetric is a metric that is also a collector.
|
||||||
|
// Because of selfCollector, most (if not all) Metrics in
|
||||||
|
// this package are also collectors.
|
||||||
|
type collectorMetric interface {
|
||||||
|
Metric
|
||||||
|
Collector
|
||||||
|
}
|
358
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
Normal file
358
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
Normal file
|
@ -0,0 +1,358 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"math"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Counter is a Metric that represents a single numerical value that only ever
|
||||||
|
// goes up. That implies that it cannot be used to count items whose number can
|
||||||
|
// also go down, e.g. the number of currently running goroutines. Those
|
||||||
|
// "counters" are represented by Gauges.
|
||||||
|
//
|
||||||
|
// A Counter is typically used to count requests served, tasks completed, errors
|
||||||
|
// occurred, etc.
|
||||||
|
//
|
||||||
|
// To create Counter instances, use NewCounter.
|
||||||
|
type Counter interface {
|
||||||
|
Metric
|
||||||
|
Collector
|
||||||
|
|
||||||
|
// Inc increments the counter by 1. Use Add to increment it by arbitrary
|
||||||
|
// non-negative values.
|
||||||
|
Inc()
|
||||||
|
// Add adds the given value to the counter. It panics if the value is <
|
||||||
|
// 0.
|
||||||
|
Add(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExemplarAdder is implemented by Counters that offer the option of adding a
|
||||||
|
// value to the Counter together with an exemplar. Its AddWithExemplar method
|
||||||
|
// works like the Add method of the Counter interface but also replaces the
|
||||||
|
// currently saved exemplar (if any) with a new one, created from the provided
|
||||||
|
// value, the current time as timestamp, and the provided labels. Empty Labels
|
||||||
|
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
|
||||||
|
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
|
||||||
|
// of the provided labels are invalid, or if the provided labels contain more
|
||||||
|
// than 128 runes in total.
|
||||||
|
type ExemplarAdder interface {
|
||||||
|
AddWithExemplar(value float64, exemplar Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CounterOpts is an alias for Opts. See there for doc comments.
|
||||||
|
type CounterOpts Opts
|
||||||
|
|
||||||
|
// CounterVecOpts bundles the options to create a CounterVec metric.
|
||||||
|
// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
|
||||||
|
// is optional and can safely be left to its default value.
|
||||||
|
type CounterVecOpts struct {
|
||||||
|
CounterOpts
|
||||||
|
|
||||||
|
// VariableLabels are used to partition the metric vector by the given set
|
||||||
|
// of labels. Each label value will be constrained with the optional Constraint
|
||||||
|
// function, if provided.
|
||||||
|
VariableLabels ConstrainableLabels
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||||
|
//
|
||||||
|
// The returned implementation also implements ExemplarAdder. It is safe to
|
||||||
|
// perform the corresponding type assertion.
|
||||||
|
//
|
||||||
|
// The returned implementation tracks the counter value in two separate
|
||||||
|
// variables, a float64 and a uint64. The latter is used to track calls of the
|
||||||
|
// Inc method and calls of the Add method with a value that can be represented
|
||||||
|
// as a uint64. This allows atomic increments of the counter with optimal
|
||||||
|
// performance. (It is common to have an Inc call in very hot execution paths.)
|
||||||
|
// Both internal tracking values are added up in the Write method. This has to
|
||||||
|
// be taken into account when it comes to precision and overflow behavior.
|
||||||
|
func NewCounter(opts CounterOpts) Counter {
|
||||||
|
desc := NewDesc(
|
||||||
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
opts.Help,
|
||||||
|
nil,
|
||||||
|
opts.ConstLabels,
|
||||||
|
)
|
||||||
|
if opts.now == nil {
|
||||||
|
opts.now = time.Now
|
||||||
|
}
|
||||||
|
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
|
||||||
|
result.init(result) // Init self-collection.
|
||||||
|
result.createdTs = timestamppb.New(opts.now())
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type counter struct {
|
||||||
|
// valBits contains the bits of the represented float64 value, while
|
||||||
|
// valInt stores values that are exact integers. Both have to go first
|
||||||
|
// in the struct to guarantee alignment for atomic operations.
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
valBits uint64
|
||||||
|
valInt uint64
|
||||||
|
|
||||||
|
selfCollector
|
||||||
|
desc *Desc
|
||||||
|
|
||||||
|
createdTs *timestamppb.Timestamp
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
|
||||||
|
|
||||||
|
// now is for testing purposes, by default it's time.Now.
|
||||||
|
now func() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Desc() *Desc {
|
||||||
|
return c.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Add(v float64) {
|
||||||
|
if v < 0 {
|
||||||
|
panic(errors.New("counter cannot decrease in value"))
|
||||||
|
}
|
||||||
|
|
||||||
|
ival := uint64(v)
|
||||||
|
if float64(ival) == v {
|
||||||
|
atomic.AddUint64(&c.valInt, ival)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&c.valBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||||
|
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) AddWithExemplar(v float64, e Labels) {
|
||||||
|
c.Add(v)
|
||||||
|
c.updateExemplar(v, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Inc() {
|
||||||
|
atomic.AddUint64(&c.valInt, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) get() float64 {
|
||||||
|
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
|
||||||
|
ival := atomic.LoadUint64(&c.valInt)
|
||||||
|
return fval + float64(ival)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Write(out *dto.Metric) error {
|
||||||
|
// Read the Exemplar first and the value second. This is to avoid a race condition
|
||||||
|
// where users see an exemplar for a not-yet-existing observation.
|
||||||
|
var exemplar *dto.Exemplar
|
||||||
|
if e := c.exemplar.Load(); e != nil {
|
||||||
|
exemplar = e.(*dto.Exemplar)
|
||||||
|
}
|
||||||
|
val := c.get()
|
||||||
|
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) updateExemplar(v float64, l Labels) {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e, err := newExemplar(v, c.now(), l)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
c.exemplar.Store(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CounterVec is a Collector that bundles a set of Counters that all share the
|
||||||
|
// same Desc, but have different values for their variable labels. This is used
|
||||||
|
// if you want to count the same thing partitioned by various dimensions
|
||||||
|
// (e.g. number of HTTP requests, partitioned by response code and
|
||||||
|
// method). Create instances with NewCounterVec.
|
||||||
|
type CounterVec struct {
|
||||||
|
*MetricVec
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||||
|
// partitioned by the given label names.
|
||||||
|
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||||
|
return V2.NewCounterVec(CounterVecOpts{
|
||||||
|
CounterOpts: opts,
|
||||||
|
VariableLabels: UnconstrainedLabels(labelNames),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
|
||||||
|
func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
|
||||||
|
desc := V2.NewDesc(
|
||||||
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
opts.Help,
|
||||||
|
opts.VariableLabels,
|
||||||
|
opts.ConstLabels,
|
||||||
|
)
|
||||||
|
if opts.now == nil {
|
||||||
|
opts.now = time.Now
|
||||||
|
}
|
||||||
|
return &CounterVec{
|
||||||
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
|
if len(lvs) != len(desc.variableLabels.names) {
|
||||||
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
|
||||||
|
}
|
||||||
|
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
|
||||||
|
result.init(result) // Init self-collection.
|
||||||
|
result.createdTs = timestamppb.New(opts.now())
|
||||||
|
return result
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricWithLabelValues returns the Counter for the given slice of label
|
||||||
|
// values (same order as the variable labels in Desc). If that combination of
|
||||||
|
// label values is accessed for the first time, a new Counter is created.
|
||||||
|
//
|
||||||
|
// It is possible to call this method without using the returned Counter to only
|
||||||
|
// create the new Counter but leave it at its starting value 0. See also the
|
||||||
|
// SummaryVec example.
|
||||||
|
//
|
||||||
|
// Keeping the Counter for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Counter from the CounterVec. In that case,
|
||||||
|
// the Counter will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Counter with the same label values is created later.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of variable labels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the GaugeVec example.
|
||||||
|
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
||||||
|
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||||
|
if metric != nil {
|
||||||
|
return metric.(Counter), err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricWith returns the Counter for the given Labels map (the label names
|
||||||
|
// must match those of the variable labels in Desc). If that label map is
|
||||||
|
// accessed for the first time, a new Counter is created. Implications of
|
||||||
|
// creating a Counter without using it and keeping the Counter for later use are
|
||||||
|
// the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the variable labels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
||||||
|
metric, err := v.MetricVec.GetMetricWith(labels)
|
||||||
|
if metric != nil {
|
||||||
|
return metric.(Counter), err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
|
// error allows shortcuts like
|
||||||
|
//
|
||||||
|
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||||
|
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||||
|
c, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
|
//
|
||||||
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
|
func (v *CounterVec) With(labels Labels) Counter {
|
||||||
|
c, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the CounterVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
|
||||||
|
vec, err := v.MetricVec.CurryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &CounterVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
|
}
|
||||||
|
|
||||||
|
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
||||||
|
// provided function.
|
||||||
|
//
|
||||||
|
// To create CounterFunc instances, use NewCounterFunc.
|
||||||
|
type CounterFunc interface {
|
||||||
|
Metric
|
||||||
|
Collector
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCounterFunc creates a new CounterFunc based on the provided
|
||||||
|
// CounterOpts. The value reported is determined by calling the given function
|
||||||
|
// from within the Write method. Take into account that metric collection may
|
||||||
|
// happen concurrently. If that results in concurrent calls to Write, like in
|
||||||
|
// the case where a CounterFunc is directly registered with Prometheus, the
|
||||||
|
// provided function must be concurrency-safe. The function should also honor
|
||||||
|
// the contract for a Counter (values only go up, not down), but compliance will
|
||||||
|
// not be checked.
|
||||||
|
//
|
||||||
|
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
|
||||||
|
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
|
||||||
|
return newValueFunc(NewDesc(
|
||||||
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
opts.Help,
|
||||||
|
nil,
|
||||||
|
opts.ConstLabels,
|
||||||
|
), CounterValue, function)
|
||||||
|
}
|
207
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
Normal file
207
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
Normal file
|
@ -0,0 +1,207 @@
|
||||||
|
// Copyright 2016 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cespare/xxhash/v2"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||||
|
// the immutable meta-data of a Metric. The normal Metric implementations
|
||||||
|
// included in this package manage their Desc under the hood. Users only have to
|
||||||
|
// deal with Desc if they use advanced features like the ExpvarCollector or
|
||||||
|
// custom Collectors and Metrics.
|
||||||
|
//
|
||||||
|
// Descriptors registered with the same registry have to fulfill certain
|
||||||
|
// consistency and uniqueness criteria if they share the same fully-qualified
|
||||||
|
// name: They must have the same help string and the same label names (aka label
|
||||||
|
// dimensions) in each, constLabels and variableLabels, but they must differ in
|
||||||
|
// the values of the constLabels.
|
||||||
|
//
|
||||||
|
// Descriptors that share the same fully-qualified names and the same label
|
||||||
|
// values of their constLabels are considered equal.
|
||||||
|
//
|
||||||
|
// Use NewDesc to create new Desc instances.
|
||||||
|
type Desc struct {
|
||||||
|
// fqName has been built from Namespace, Subsystem, and Name.
|
||||||
|
fqName string
|
||||||
|
// help provides some helpful information about this metric.
|
||||||
|
help string
|
||||||
|
// constLabelPairs contains precalculated DTO label pairs based on
|
||||||
|
// the constant labels.
|
||||||
|
constLabelPairs []*dto.LabelPair
|
||||||
|
// variableLabels contains names of labels and normalization function for
|
||||||
|
// which the metric maintains variable values.
|
||||||
|
variableLabels *compiledLabels
|
||||||
|
// id is a hash of the values of the ConstLabels and fqName. This
|
||||||
|
// must be unique among all registered descriptors and can therefore be
|
||||||
|
// used as an identifier of the descriptor.
|
||||||
|
id uint64
|
||||||
|
// dimHash is a hash of the label names (preset and variable) and the
|
||||||
|
// Help string. Each Desc with the same fqName must have the same
|
||||||
|
// dimHash.
|
||||||
|
dimHash uint64
|
||||||
|
// err is an error that occurred during construction. It is reported on
|
||||||
|
// registration time.
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||||
|
// and will be reported on registration time. variableLabels and constLabels can
|
||||||
|
// be nil if no such labels should be set. fqName must not be empty.
|
||||||
|
//
|
||||||
|
// variableLabels only contain the label names. Their label values are variable
|
||||||
|
// and therefore not part of the Desc. (They are managed within the Metric.)
|
||||||
|
//
|
||||||
|
// For constLabels, the label values are constant. Therefore, they are fully
|
||||||
|
// specified in the Desc. See the Collector example for a usage pattern.
|
||||||
|
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
||||||
|
return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||||
|
// and will be reported on registration time. variableLabels and constLabels can
|
||||||
|
// be nil if no such labels should be set. fqName must not be empty.
|
||||||
|
//
|
||||||
|
// variableLabels only contain the label names and normalization functions. Their
|
||||||
|
// label values are variable and therefore not part of the Desc. (They are managed
|
||||||
|
// within the Metric.)
|
||||||
|
//
|
||||||
|
// For constLabels, the label values are constant. Therefore, they are fully
|
||||||
|
// specified in the Desc. See the Collector example for a usage pattern.
|
||||||
|
func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
|
||||||
|
d := &Desc{
|
||||||
|
fqName: fqName,
|
||||||
|
help: help,
|
||||||
|
variableLabels: variableLabels.compile(),
|
||||||
|
}
|
||||||
|
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||||
|
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
// labelValues contains the label values of const labels (in order of
|
||||||
|
// their sorted label names) plus the fqName (at position 0).
|
||||||
|
labelValues := make([]string, 1, len(constLabels)+1)
|
||||||
|
labelValues[0] = fqName
|
||||||
|
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
|
||||||
|
labelNameSet := map[string]struct{}{}
|
||||||
|
// First add only the const label names and sort them...
|
||||||
|
for labelName := range constLabels {
|
||||||
|
if !checkLabelName(labelName) {
|
||||||
|
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
labelNames = append(labelNames, labelName)
|
||||||
|
labelNameSet[labelName] = struct{}{}
|
||||||
|
}
|
||||||
|
sort.Strings(labelNames)
|
||||||
|
// ... so that we can now add const label values in the order of their names.
|
||||||
|
for _, labelName := range labelNames {
|
||||||
|
labelValues = append(labelValues, constLabels[labelName])
|
||||||
|
}
|
||||||
|
// Validate the const label values. They can't have a wrong cardinality, so
|
||||||
|
// use in len(labelValues) as expectedNumberOfValues.
|
||||||
|
if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
|
||||||
|
d.err = err
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
// Now add the variable label names, but prefix them with something that
|
||||||
|
// cannot be in a regular label name. That prevents matching the label
|
||||||
|
// dimension with a different mix between preset and variable labels.
|
||||||
|
for _, label := range d.variableLabels.names {
|
||||||
|
if !checkLabelName(label) {
|
||||||
|
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
labelNames = append(labelNames, "$"+label)
|
||||||
|
labelNameSet[label] = struct{}{}
|
||||||
|
}
|
||||||
|
if len(labelNames) != len(labelNameSet) {
|
||||||
|
d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
xxh := xxhash.New()
|
||||||
|
for _, val := range labelValues {
|
||||||
|
xxh.WriteString(val)
|
||||||
|
xxh.Write(separatorByteSlice)
|
||||||
|
}
|
||||||
|
d.id = xxh.Sum64()
|
||||||
|
// Sort labelNames so that order doesn't matter for the hash.
|
||||||
|
sort.Strings(labelNames)
|
||||||
|
// Now hash together (in this order) the help string and the sorted
|
||||||
|
// label names.
|
||||||
|
xxh.Reset()
|
||||||
|
xxh.WriteString(help)
|
||||||
|
xxh.Write(separatorByteSlice)
|
||||||
|
for _, labelName := range labelNames {
|
||||||
|
xxh.WriteString(labelName)
|
||||||
|
xxh.Write(separatorByteSlice)
|
||||||
|
}
|
||||||
|
d.dimHash = xxh.Sum64()
|
||||||
|
|
||||||
|
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
||||||
|
for n, v := range constLabels {
|
||||||
|
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
|
||||||
|
Name: proto.String(n),
|
||||||
|
Value: proto.String(v),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
|
||||||
|
// provided error set. If a collector returning such a descriptor is registered,
|
||||||
|
// registration will fail with the provided error. NewInvalidDesc can be used by
|
||||||
|
// a Collector to signal inability to describe itself.
|
||||||
|
func NewInvalidDesc(err error) *Desc {
|
||||||
|
return &Desc{
|
||||||
|
err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Desc) String() string {
|
||||||
|
lpStrings := make([]string, 0, len(d.constLabelPairs))
|
||||||
|
for _, lp := range d.constLabelPairs {
|
||||||
|
lpStrings = append(
|
||||||
|
lpStrings,
|
||||||
|
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
vlStrings := make([]string, 0, len(d.variableLabels.names))
|
||||||
|
for _, vl := range d.variableLabels.names {
|
||||||
|
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
|
||||||
|
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
|
||||||
|
} else {
|
||||||
|
vlStrings = append(vlStrings, vl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
|
||||||
|
d.fqName,
|
||||||
|
d.help,
|
||||||
|
strings.Join(lpStrings, ","),
|
||||||
|
strings.Join(vlStrings, ","),
|
||||||
|
)
|
||||||
|
}
|
210
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
Normal file
210
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,210 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package prometheus is the core instrumentation package. It provides metrics
|
||||||
|
// primitives to instrument code for monitoring. It also offers a registry for
|
||||||
|
// metrics. Sub-packages allow to expose the registered metrics via HTTP
|
||||||
|
// (package promhttp) or push them to a Pushgateway (package push). There is
|
||||||
|
// also a sub-package promauto, which provides metrics constructors with
|
||||||
|
// automatic registration.
|
||||||
|
//
|
||||||
|
// All exported functions and methods are safe to be used concurrently unless
|
||||||
|
// specified otherwise.
|
||||||
|
//
|
||||||
|
// # A Basic Example
|
||||||
|
//
|
||||||
|
// As a starting point, a very basic usage example:
|
||||||
|
//
|
||||||
|
// package main
|
||||||
|
//
|
||||||
|
// import (
|
||||||
|
// "log"
|
||||||
|
// "net/http"
|
||||||
|
//
|
||||||
|
// "github.com/prometheus/client_golang/prometheus"
|
||||||
|
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// type metrics struct {
|
||||||
|
// cpuTemp prometheus.Gauge
|
||||||
|
// hdFailures *prometheus.CounterVec
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func NewMetrics(reg prometheus.Registerer) *metrics {
|
||||||
|
// m := &metrics{
|
||||||
|
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
// Name: "cpu_temperature_celsius",
|
||||||
|
// Help: "Current temperature of the CPU.",
|
||||||
|
// }),
|
||||||
|
// hdFailures: prometheus.NewCounterVec(
|
||||||
|
// prometheus.CounterOpts{
|
||||||
|
// Name: "hd_errors_total",
|
||||||
|
// Help: "Number of hard-disk errors.",
|
||||||
|
// },
|
||||||
|
// []string{"device"},
|
||||||
|
// ),
|
||||||
|
// }
|
||||||
|
// reg.MustRegister(m.cpuTemp)
|
||||||
|
// reg.MustRegister(m.hdFailures)
|
||||||
|
// return m
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func main() {
|
||||||
|
// // Create a non-global registry.
|
||||||
|
// reg := prometheus.NewRegistry()
|
||||||
|
//
|
||||||
|
// // Create new metrics and register them using the custom registry.
|
||||||
|
// m := NewMetrics(reg)
|
||||||
|
// // Set values for the new created metrics.
|
||||||
|
// m.cpuTemp.Set(65.3)
|
||||||
|
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||||
|
//
|
||||||
|
// // Expose metrics and custom registry via an HTTP server
|
||||||
|
// // using the HandleFor function. "/metrics" is the usual endpoint for that.
|
||||||
|
// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
|
||||||
|
// log.Fatal(http.ListenAndServe(":8080", nil))
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
||||||
|
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
||||||
|
// It register the metrics using a custom registry and exposes them via an HTTP server
|
||||||
|
// on the /metrics endpoint.
|
||||||
|
//
|
||||||
|
// # Metrics
|
||||||
|
//
|
||||||
|
// The number of exported identifiers in this package might appear a bit
|
||||||
|
// overwhelming. However, in addition to the basic plumbing shown in the example
|
||||||
|
// above, you only need to understand the different metric types and their
|
||||||
|
// vector versions for basic usage. Furthermore, if you are not concerned with
|
||||||
|
// fine-grained control of when and how to register metrics with the registry,
|
||||||
|
// have a look at the promauto package, which will effectively allow you to
|
||||||
|
// ignore registration altogether in simple cases.
|
||||||
|
//
|
||||||
|
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||||
|
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||||
|
// of those four metric types can be found in the Prometheus docs:
|
||||||
|
// https://prometheus.io/docs/concepts/metric_types/
|
||||||
|
//
|
||||||
|
// In addition to the fundamental metric types Gauge, Counter, Summary, and
|
||||||
|
// Histogram, a very important part of the Prometheus data model is the
|
||||||
|
// partitioning of samples along dimensions called labels, which results in
|
||||||
|
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
||||||
|
// and HistogramVec.
|
||||||
|
//
|
||||||
|
// While only the fundamental metric types implement the Metric interface, both
|
||||||
|
// the metrics and their vector versions implement the Collector interface. A
|
||||||
|
// Collector manages the collection of a number of Metrics, but for convenience,
|
||||||
|
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
|
||||||
|
// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
|
||||||
|
// and HistogramVec are not.
|
||||||
|
//
|
||||||
|
// To create instances of Metrics and their vector versions, you need a suitable
|
||||||
|
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
|
||||||
|
//
|
||||||
|
// # Custom Collectors and constant Metrics
|
||||||
|
//
|
||||||
|
// While you could create your own implementations of Metric, most likely you
|
||||||
|
// will only ever implement the Collector interface on your own. At a first
|
||||||
|
// glance, a custom Collector seems handy to bundle Metrics for common
|
||||||
|
// registration (with the prime example of the different metric vectors above,
|
||||||
|
// which bundle all the metrics of the same name but with different labels).
|
||||||
|
//
|
||||||
|
// There is a more involved use case, too: If you already have metrics
|
||||||
|
// available, created outside of the Prometheus context, you don't need the
|
||||||
|
// interface of the various Metric types. You essentially want to mirror the
|
||||||
|
// existing numbers into Prometheus Metrics during collection. An own
|
||||||
|
// implementation of the Collector interface is perfect for that. You can create
|
||||||
|
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||||
|
// NewConstSummary (and their respective Must… versions). NewConstMetric is used
|
||||||
|
// for all metric types with just a float64 as their value: Counter, Gauge, and
|
||||||
|
// a special “type” called Untyped. Use the latter if you are not sure if the
|
||||||
|
// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
|
||||||
|
// happens in the Collect method. The Describe method has to return separate
|
||||||
|
// Desc instances, representative of the “throw-away” metrics to be created
|
||||||
|
// later. NewDesc comes in handy to create those Desc instances. Alternatively,
|
||||||
|
// you could return no Desc at all, which will mark the Collector “unchecked”.
|
||||||
|
// No checks are performed at registration time, but metric consistency will
|
||||||
|
// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||||
|
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||||
|
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||||
|
// implementer of the Collector. While this is not a desirable state, it is
|
||||||
|
// sometimes necessary. The typical use case is a situation where the exact
|
||||||
|
// metrics to be returned by a Collector cannot be predicted at registration
|
||||||
|
// time, but the implementer has sufficient knowledge of the whole system to
|
||||||
|
// guarantee metric consistency.
|
||||||
|
//
|
||||||
|
// The Collector example illustrates the use case. You can also look at the
|
||||||
|
// source code of the processCollector (mirroring process metrics), the
|
||||||
|
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
|
||||||
|
// metrics) as examples that are used in this package itself.
|
||||||
|
//
|
||||||
|
// If you just need to call a function to get a single float value to collect as
|
||||||
|
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
||||||
|
// shortcuts.
|
||||||
|
//
|
||||||
|
// # Advanced Uses of the Registry
|
||||||
|
//
|
||||||
|
// While MustRegister is the by far most common way of registering a Collector,
|
||||||
|
// sometimes you might want to handle the errors the registration might cause.
|
||||||
|
// As suggested by the name, MustRegister panics if an error occurs. With the
|
||||||
|
// Register function, the error is returned and can be handled.
|
||||||
|
//
|
||||||
|
// An error is returned if the registered Collector is incompatible or
|
||||||
|
// inconsistent with already registered metrics. The registry aims for
|
||||||
|
// consistency of the collected metrics according to the Prometheus data model.
|
||||||
|
// Inconsistencies are ideally detected at registration time, not at collect
|
||||||
|
// time. The former will usually be detected at start-up time of a program,
|
||||||
|
// while the latter will only happen at scrape time, possibly not even on the
|
||||||
|
// first scrape if the inconsistency only becomes relevant later. That is the
|
||||||
|
// main reason why a Collector and a Metric have to describe themselves to the
|
||||||
|
// registry.
|
||||||
|
//
|
||||||
|
// So far, everything we did operated on the so-called default registry, as it
|
||||||
|
// can be found in the global DefaultRegisterer variable. With NewRegistry, you
|
||||||
|
// can create a custom registry, or you can even implement the Registerer or
|
||||||
|
// Gatherer interfaces yourself. The methods Register and Unregister work in the
|
||||||
|
// same way on a custom registry as the global functions Register and Unregister
|
||||||
|
// on the default registry.
|
||||||
|
//
|
||||||
|
// There are a number of uses for custom registries: You can use registries with
|
||||||
|
// special properties, see NewPedanticRegistry. You can avoid global state, as
|
||||||
|
// it is imposed by the DefaultRegisterer. You can use multiple registries at
|
||||||
|
// the same time to expose different metrics in different ways. You can use
|
||||||
|
// separate registries for testing purposes.
|
||||||
|
//
|
||||||
|
// Also note that the DefaultRegisterer comes registered with a Collector for Go
|
||||||
|
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
||||||
|
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||||
|
// yourself about the Collectors to register.
|
||||||
|
//
|
||||||
|
// # HTTP Exposition
|
||||||
|
//
|
||||||
|
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||||
|
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||||
|
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||||
|
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
||||||
|
//
|
||||||
|
// # Pushing to the Pushgateway
|
||||||
|
//
|
||||||
|
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||||
|
//
|
||||||
|
// # Graphite Bridge
|
||||||
|
//
|
||||||
|
// Functions and examples to push metrics from a Gatherer to Graphite can be
|
||||||
|
// found in the graphite sub-package.
|
||||||
|
//
|
||||||
|
// # Other Means of Exposition
|
||||||
|
//
|
||||||
|
// More ways of exposing metrics can easily be added by following the approaches
|
||||||
|
// of the existing implementations.
|
||||||
|
package prometheus
|
86
vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
generated
vendored
Normal file
86
vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"expvar"
|
||||||
|
)
|
||||||
|
|
||||||
|
type expvarCollector struct {
|
||||||
|
exports map[string]*Desc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewExpvarCollector instead.
|
||||||
|
func NewExpvarCollector(exports map[string]*Desc) Collector {
|
||||||
|
return &expvarCollector{
|
||||||
|
exports: exports,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe implements Collector.
|
||||||
|
func (e *expvarCollector) Describe(ch chan<- *Desc) {
|
||||||
|
for _, desc := range e.exports {
|
||||||
|
ch <- desc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect implements Collector.
|
||||||
|
func (e *expvarCollector) Collect(ch chan<- Metric) {
|
||||||
|
for name, desc := range e.exports {
|
||||||
|
var m Metric
|
||||||
|
expVar := expvar.Get(name)
|
||||||
|
if expVar == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var v interface{}
|
||||||
|
labels := make([]string, len(desc.variableLabels.names))
|
||||||
|
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
|
||||||
|
ch <- NewInvalidMetric(desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var processValue func(v interface{}, i int)
|
||||||
|
processValue = func(v interface{}, i int) {
|
||||||
|
if i >= len(labels) {
|
||||||
|
copiedLabels := append(make([]string, 0, len(labels)), labels...)
|
||||||
|
switch v := v.(type) {
|
||||||
|
case float64:
|
||||||
|
m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
|
||||||
|
case bool:
|
||||||
|
if v {
|
||||||
|
m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
|
||||||
|
} else {
|
||||||
|
m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- m
|
||||||
|
return
|
||||||
|
}
|
||||||
|
vm, ok := v.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for lv, val := range vm {
|
||||||
|
labels[i] = lv
|
||||||
|
processValue(val, i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
processValue(v, 0)
|
||||||
|
}
|
||||||
|
}
|
42
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
Normal file
42
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||||
|
|
||||||
|
const (
|
||||||
|
offset64 = 14695981039346656037
|
||||||
|
prime64 = 1099511628211
|
||||||
|
)
|
||||||
|
|
||||||
|
// hashNew initializies a new fnv64a hash value.
|
||||||
|
func hashNew() uint64 {
|
||||||
|
return offset64
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
||||||
|
func hashAdd(h uint64, s string) uint64 {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
h ^= uint64(s[i])
|
||||||
|
h *= prime64
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
||||||
|
func hashAddByte(h uint64, b byte) uint64 {
|
||||||
|
h ^= uint64(b)
|
||||||
|
h *= prime64
|
||||||
|
return h
|
||||||
|
}
|
311
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
Normal file
311
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
Normal file
|
@ -0,0 +1,311 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Gauge is a Metric that represents a single numerical value that can
|
||||||
|
// arbitrarily go up and down.
|
||||||
|
//
|
||||||
|
// A Gauge is typically used for measured values like temperatures or current
|
||||||
|
// memory usage, but also "counts" that can go up and down, like the number of
|
||||||
|
// running goroutines.
|
||||||
|
//
|
||||||
|
// To create Gauge instances, use NewGauge.
|
||||||
|
type Gauge interface {
|
||||||
|
Metric
|
||||||
|
Collector
|
||||||
|
|
||||||
|
// Set sets the Gauge to an arbitrary value.
|
||||||
|
Set(float64)
|
||||||
|
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
|
||||||
|
// values.
|
||||||
|
Inc()
|
||||||
|
// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
|
||||||
|
// values.
|
||||||
|
Dec()
|
||||||
|
// Add adds the given value to the Gauge. (The value can be negative,
|
||||||
|
// resulting in a decrease of the Gauge.)
|
||||||
|
Add(float64)
|
||||||
|
// Sub subtracts the given value from the Gauge. (The value can be
|
||||||
|
// negative, resulting in an increase of the Gauge.)
|
||||||
|
Sub(float64)
|
||||||
|
|
||||||
|
// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
|
||||||
|
SetToCurrentTime()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GaugeOpts is an alias for Opts. See there for doc comments.
|
||||||
|
type GaugeOpts Opts
|
||||||
|
|
||||||
|
// GaugeVecOpts bundles the options to create a GaugeVec metric.
|
||||||
|
// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
|
||||||
|
// is optional and can safely be left to its default value.
|
||||||
|
type GaugeVecOpts struct {
|
||||||
|
GaugeOpts
|
||||||
|
|
||||||
|
// VariableLabels are used to partition the metric vector by the given set
|
||||||
|
// of labels. Each label value will be constrained with the optional Constraint
|
||||||
|
// function, if provided.
|
||||||
|
VariableLabels ConstrainableLabels
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||||
|
//
|
||||||
|
// The returned implementation is optimized for a fast Set method. If you have a
|
||||||
|
// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
|
||||||
|
// the former. For example, the Inc method of the returned Gauge is slower than
|
||||||
|
// the Inc method of a Counter returned by NewCounter. This matches the typical
|
||||||
|
// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
|
||||||
|
// the latter Inc-heavy.
|
||||||
|
func NewGauge(opts GaugeOpts) Gauge {
|
||||||
|
desc := NewDesc(
|
||||||
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
opts.Help,
|
||||||
|
nil,
|
||||||
|
opts.ConstLabels,
|
||||||
|
)
|
||||||
|
result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
|
||||||
|
result.init(result) // Init self-collection.
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type gauge struct {
|
||||||
|
// valBits contains the bits of the represented float64 value. It has
|
||||||
|
// to go first in the struct to guarantee alignment for atomic
|
||||||
|
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
valBits uint64
|
||||||
|
|
||||||
|
selfCollector
|
||||||
|
|
||||||
|
desc *Desc
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Desc() *Desc {
|
||||||
|
return g.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Set(val float64) {
|
||||||
|
atomic.StoreUint64(&g.valBits, math.Float64bits(val))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) SetToCurrentTime() {
|
||||||
|
g.Set(float64(time.Now().UnixNano()) / 1e9)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Inc() {
|
||||||
|
g.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Dec() {
|
||||||
|
g.Add(-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Add(val float64) {
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&g.valBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||||
|
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Sub(val float64) {
|
||||||
|
g.Add(val * -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gauge) Write(out *dto.Metric) error {
|
||||||
|
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
||||||
|
return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||||
|
// Desc, but have different values for their variable labels. This is used if
|
||||||
|
// you want to count the same thing partitioned by various dimensions
|
||||||
|
// (e.g. number of operations queued, partitioned by user and operation
|
||||||
|
// type). Create instances with NewGaugeVec.
|
||||||
|
type GaugeVec struct {
|
||||||
|
*MetricVec
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||||
|
// partitioned by the given label names.
|
||||||
|
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||||
|
return V2.NewGaugeVec(GaugeVecOpts{
|
||||||
|
GaugeOpts: opts,
|
||||||
|
VariableLabels: UnconstrainedLabels(labelNames),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
|
||||||
|
func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
|
||||||
|
desc := V2.NewDesc(
|
||||||
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
opts.Help,
|
||||||
|
opts.VariableLabels,
|
||||||
|
opts.ConstLabels,
|
||||||
|
)
|
||||||
|
return &GaugeVec{
|
||||||
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
|
if len(lvs) != len(desc.variableLabels.names) {
|
||||||
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
|
||||||
|
}
|
||||||
|
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
|
||||||
|
result.init(result) // Init self-collection.
|
||||||
|
return result
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricWithLabelValues returns the Gauge for the given slice of label
|
||||||
|
// values (same order as the variable labels in Desc). If that combination of
|
||||||
|
// label values is accessed for the first time, a new Gauge is created.
|
||||||
|
//
|
||||||
|
// It is possible to call this method without using the returned Gauge to only
|
||||||
|
// create the new Gauge but leave it at its starting value 0. See also the
|
||||||
|
// SummaryVec example.
|
||||||
|
//
|
||||||
|
// Keeping the Gauge for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
|
||||||
|
// Gauge will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Gauge with the same label values is created later. See also the CounterVec
|
||||||
|
// example.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of variable labels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
||||||
|
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||||
|
if metric != nil {
|
||||||
|
return metric.(Gauge), err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricWith returns the Gauge for the given Labels map (the label names
|
||||||
|
// must match those of the variable labels in Desc). If that label map is
|
||||||
|
// accessed for the first time, a new Gauge is created. Implications of
|
||||||
|
// creating a Gauge without using it and keeping the Gauge for later use are
|
||||||
|
// the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the variable labels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
||||||
|
metric, err := v.MetricVec.GetMetricWith(labels)
|
||||||
|
if metric != nil {
|
||||||
|
return metric.(Gauge), err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
|
// error allows shortcuts like
|
||||||
|
//
|
||||||
|
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||||
|
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||||
|
g, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
|
//
|
||||||
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
|
func (v *GaugeVec) With(labels Labels) Gauge {
|
||||||
|
g, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the GaugeVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
|
||||||
|
vec, err := v.MetricVec.CurryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &GaugeVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
|
}
|
||||||
|
|
||||||
|
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
||||||
|
// provided function.
|
||||||
|
//
|
||||||
|
// To create GaugeFunc instances, use NewGaugeFunc.
|
||||||
|
type GaugeFunc interface {
|
||||||
|
Metric
|
||||||
|
Collector
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
|
||||||
|
// value reported is determined by calling the given function from within the
|
||||||
|
// Write method. Take into account that metric collection may happen
|
||||||
|
// concurrently. Therefore, it must be safe to call the provided function
|
||||||
|
// concurrently.
|
||||||
|
//
|
||||||
|
// NewGaugeFunc is a good way to create an “info” style metric with a constant
|
||||||
|
// value of 1. Example:
|
||||||
|
// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
|
||||||
|
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
|
||||||
|
return newValueFunc(NewDesc(
|
||||||
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
opts.Help,
|
||||||
|
nil,
|
||||||
|
opts.ConstLabels,
|
||||||
|
), GaugeValue, function)
|
||||||
|
}
|
26
vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
generated
vendored
Normal file
26
vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !js || wasm
|
||||||
|
// +build !js wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
func getPIDFn() func() (int, error) {
|
||||||
|
pid := os.Getpid()
|
||||||
|
return func() (int, error) {
|
||||||
|
return pid, nil
|
||||||
|
}
|
||||||
|
}
|
23
vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
generated
vendored
Normal file
23
vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build js && !wasm
|
||||||
|
// +build js,!wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
func getPIDFn() func() (int, error) {
|
||||||
|
return func() (int, error) {
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
}
|
281
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
Normal file
281
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
Normal file
|
@ -0,0 +1,281 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
|
||||||
|
// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
|
||||||
|
// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
|
||||||
|
// populated using runtime/metrics.
|
||||||
|
func goRuntimeMemStats() memStatsMetrics {
|
||||||
|
return memStatsMetrics{
|
||||||
|
{
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("alloc_bytes"),
|
||||||
|
"Number of bytes allocated and still in use.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("alloc_bytes_total"),
|
||||||
|
"Total number of bytes allocated, even if freed.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
|
||||||
|
valType: CounterValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("sys_bytes"),
|
||||||
|
"Number of bytes obtained from system.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("lookups_total"),
|
||||||
|
"Total number of pointer lookups.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
|
||||||
|
valType: CounterValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("mallocs_total"),
|
||||||
|
"Total number of mallocs.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
|
||||||
|
valType: CounterValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("frees_total"),
|
||||||
|
"Total number of frees.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
|
||||||
|
valType: CounterValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("heap_alloc_bytes"),
|
||||||
|
"Number of heap bytes allocated and still in use.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("heap_sys_bytes"),
|
||||||
|
"Number of heap bytes obtained from system.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("heap_idle_bytes"),
|
||||||
|
"Number of heap bytes waiting to be used.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("heap_inuse_bytes"),
|
||||||
|
"Number of heap bytes that are in use.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("heap_released_bytes"),
|
||||||
|
"Number of heap bytes released to OS.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("heap_objects"),
|
||||||
|
"Number of allocated objects.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("stack_inuse_bytes"),
|
||||||
|
"Number of bytes in use by the stack allocator.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("stack_sys_bytes"),
|
||||||
|
"Number of bytes obtained from system for stack allocator.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("mspan_inuse_bytes"),
|
||||||
|
"Number of bytes in use by mspan structures.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("mspan_sys_bytes"),
|
||||||
|
"Number of bytes used for mspan structures obtained from system.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("mcache_inuse_bytes"),
|
||||||
|
"Number of bytes in use by mcache structures.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("mcache_sys_bytes"),
|
||||||
|
"Number of bytes used for mcache structures obtained from system.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("buck_hash_sys_bytes"),
|
||||||
|
"Number of bytes used by the profiling bucket hash table.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("gc_sys_bytes"),
|
||||||
|
"Number of bytes used for garbage collection system metadata.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("other_sys_bytes"),
|
||||||
|
"Number of bytes used for other system allocations.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
}, {
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("next_gc_bytes"),
|
||||||
|
"Number of heap bytes when next garbage collection will take place.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
||||||
|
valType: GaugeValue,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type baseGoCollector struct {
|
||||||
|
goroutinesDesc *Desc
|
||||||
|
threadsDesc *Desc
|
||||||
|
gcDesc *Desc
|
||||||
|
gcLastTimeDesc *Desc
|
||||||
|
goInfoDesc *Desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBaseGoCollector() baseGoCollector {
|
||||||
|
return baseGoCollector{
|
||||||
|
goroutinesDesc: NewDesc(
|
||||||
|
"go_goroutines",
|
||||||
|
"Number of goroutines that currently exist.",
|
||||||
|
nil, nil),
|
||||||
|
threadsDesc: NewDesc(
|
||||||
|
"go_threads",
|
||||||
|
"Number of OS threads created.",
|
||||||
|
nil, nil),
|
||||||
|
gcDesc: NewDesc(
|
||||||
|
"go_gc_duration_seconds",
|
||||||
|
"A summary of the pause duration of garbage collection cycles.",
|
||||||
|
nil, nil),
|
||||||
|
gcLastTimeDesc: NewDesc(
|
||||||
|
"go_memstats_last_gc_time_seconds",
|
||||||
|
"Number of seconds since 1970 of last garbage collection.",
|
||||||
|
nil, nil),
|
||||||
|
goInfoDesc: NewDesc(
|
||||||
|
"go_info",
|
||||||
|
"Information about the Go environment.",
|
||||||
|
nil, Labels{"version": runtime.Version()}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe returns all descriptions of the collector.
|
||||||
|
func (c *baseGoCollector) Describe(ch chan<- *Desc) {
|
||||||
|
ch <- c.goroutinesDesc
|
||||||
|
ch <- c.threadsDesc
|
||||||
|
ch <- c.gcDesc
|
||||||
|
ch <- c.gcLastTimeDesc
|
||||||
|
ch <- c.goInfoDesc
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the current state of all metrics of the collector.
|
||||||
|
func (c *baseGoCollector) Collect(ch chan<- Metric) {
|
||||||
|
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||||
|
|
||||||
|
n := getRuntimeNumThreads()
|
||||||
|
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
|
||||||
|
|
||||||
|
var stats debug.GCStats
|
||||||
|
stats.PauseQuantiles = make([]time.Duration, 5)
|
||||||
|
debug.ReadGCStats(&stats)
|
||||||
|
|
||||||
|
quantiles := make(map[float64]float64)
|
||||||
|
for idx, pq := range stats.PauseQuantiles[1:] {
|
||||||
|
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
||||||
|
}
|
||||||
|
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||||
|
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
||||||
|
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
|
||||||
|
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func memstatNamespace(s string) string {
|
||||||
|
return "go_memstats_" + s
|
||||||
|
}
|
||||||
|
|
||||||
|
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
|
||||||
|
// value type for memstat metrics.
|
||||||
|
type memStatsMetrics []struct {
|
||||||
|
desc *Desc
|
||||||
|
eval func(*runtime.MemStats) float64
|
||||||
|
valType ValueType
|
||||||
|
}
|
122
vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
generated
vendored
Normal file
122
vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !go1.17
|
||||||
|
// +build !go1.17
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type goCollector struct {
|
||||||
|
base baseGoCollector
|
||||||
|
|
||||||
|
// ms... are memstats related.
|
||||||
|
msLast *runtime.MemStats // Previously collected memstats.
|
||||||
|
msLastTimestamp time.Time
|
||||||
|
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msRead func(*runtime.MemStats) // For mocking in tests.
|
||||||
|
msMaxWait time.Duration // Wait time for fresh memstats.
|
||||||
|
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewGoCollector instead.
|
||||||
|
func NewGoCollector() Collector {
|
||||||
|
msMetrics := goRuntimeMemStats()
|
||||||
|
msMetrics = append(msMetrics, struct {
|
||||||
|
desc *Desc
|
||||||
|
eval func(*runtime.MemStats) float64
|
||||||
|
valType ValueType
|
||||||
|
}{
|
||||||
|
// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("gc_cpu_fraction"),
|
||||||
|
"The fraction of this program's available CPU time used by the GC since the program started.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
||||||
|
valType: GaugeValue,
|
||||||
|
})
|
||||||
|
return &goCollector{
|
||||||
|
base: newBaseGoCollector(),
|
||||||
|
msLast: &runtime.MemStats{},
|
||||||
|
msRead: runtime.ReadMemStats,
|
||||||
|
msMaxWait: time.Second,
|
||||||
|
msMaxAge: 5 * time.Minute,
|
||||||
|
msMetrics: msMetrics,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe returns all descriptions of the collector.
|
||||||
|
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
|
c.base.Describe(ch)
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- i.desc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the current state of all metrics of the collector.
|
||||||
|
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
var (
|
||||||
|
ms = &runtime.MemStats{}
|
||||||
|
done = make(chan struct{})
|
||||||
|
)
|
||||||
|
// Start reading memstats first as it might take a while.
|
||||||
|
go func() {
|
||||||
|
c.msRead(ms)
|
||||||
|
c.msMtx.Lock()
|
||||||
|
c.msLast = ms
|
||||||
|
c.msLastTimestamp = time.Now()
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Collect base non-memory metrics.
|
||||||
|
c.base.Collect(ch)
|
||||||
|
|
||||||
|
timer := time.NewTimer(c.msMaxWait)
|
||||||
|
select {
|
||||||
|
case <-done: // Our own ReadMemStats succeeded in time. Use it.
|
||||||
|
timer.Stop() // Important for high collection frequencies to not pile up timers.
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
return
|
||||||
|
case <-timer.C: // Time out, use last memstats if possible. Continue below.
|
||||||
|
}
|
||||||
|
c.msMtx.Lock()
|
||||||
|
if time.Since(c.msLastTimestamp) < c.msMaxAge {
|
||||||
|
// Last memstats are recent enough. Collect from them under the lock.
|
||||||
|
c.msCollect(ch, c.msLast)
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// If we are here, the last memstats are too old or don't exist. We have
|
||||||
|
// to wait until our own ReadMemStats finally completes. For that to
|
||||||
|
// happen, we have to release the lock.
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
<-done
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
||||||
|
}
|
||||||
|
}
|
567
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
Normal file
567
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
Normal file
|
@ -0,0 +1,567 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build go1.17
|
||||||
|
// +build go1.17
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"runtime"
|
||||||
|
"runtime/metrics"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// constants for strings referenced more than once.
|
||||||
|
goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
|
||||||
|
goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
|
||||||
|
goGCHeapFreesObjects = "/gc/heap/frees:objects"
|
||||||
|
goGCHeapFreesBytes = "/gc/heap/frees:bytes"
|
||||||
|
goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
|
||||||
|
goGCHeapObjects = "/gc/heap/objects:objects"
|
||||||
|
goGCHeapGoalBytes = "/gc/heap/goal:bytes"
|
||||||
|
goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
|
||||||
|
goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
|
||||||
|
goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
|
||||||
|
goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
|
||||||
|
goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
|
||||||
|
goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
|
||||||
|
goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
|
||||||
|
goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
|
||||||
|
goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
|
||||||
|
goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
|
||||||
|
goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
|
||||||
|
goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
|
||||||
|
goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
|
||||||
|
goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
|
||||||
|
var rmNamesForMemStatsMetrics = []string{
|
||||||
|
goGCHeapTinyAllocsObjects,
|
||||||
|
goGCHeapAllocsObjects,
|
||||||
|
goGCHeapFreesObjects,
|
||||||
|
goGCHeapAllocsBytes,
|
||||||
|
goGCHeapObjects,
|
||||||
|
goGCHeapGoalBytes,
|
||||||
|
goMemoryClassesTotalBytes,
|
||||||
|
goMemoryClassesHeapObjectsBytes,
|
||||||
|
goMemoryClassesHeapUnusedBytes,
|
||||||
|
goMemoryClassesHeapReleasedBytes,
|
||||||
|
goMemoryClassesHeapFreeBytes,
|
||||||
|
goMemoryClassesHeapStacksBytes,
|
||||||
|
goMemoryClassesOSStacksBytes,
|
||||||
|
goMemoryClassesMetadataMSpanInuseBytes,
|
||||||
|
goMemoryClassesMetadataMSPanFreeBytes,
|
||||||
|
goMemoryClassesMetadataMCacheInuseBytes,
|
||||||
|
goMemoryClassesMetadataMCacheFreeBytes,
|
||||||
|
goMemoryClassesProfilingBucketsBytes,
|
||||||
|
goMemoryClassesMetadataOtherBytes,
|
||||||
|
goMemoryClassesOtherBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
func bestEffortLookupRM(lookup []string) []metrics.Description {
|
||||||
|
ret := make([]metrics.Description, 0, len(lookup))
|
||||||
|
for _, rm := range metrics.All() {
|
||||||
|
for _, m := range lookup {
|
||||||
|
if m == rm.Name {
|
||||||
|
ret = append(ret, rm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
type goCollector struct {
|
||||||
|
base baseGoCollector
|
||||||
|
|
||||||
|
// mu protects updates to all fields ensuring a consistent
|
||||||
|
// snapshot is always produced by Collect.
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
// Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
|
||||||
|
sampleBuf []metrics.Sample
|
||||||
|
// sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
|
||||||
|
sampleMap map[string]*metrics.Sample
|
||||||
|
|
||||||
|
// rmExposedMetrics represents all runtime/metrics package metrics
|
||||||
|
// that were configured to be exposed.
|
||||||
|
rmExposedMetrics []collectorMetric
|
||||||
|
rmExactSumMapForHist map[string]string
|
||||||
|
|
||||||
|
// With Go 1.17, the runtime/metrics package was introduced.
|
||||||
|
// From that point on, metric names produced by the runtime/metrics
|
||||||
|
// package could be generated from runtime/metrics names. However,
|
||||||
|
// these differ from the old names for the same values.
|
||||||
|
//
|
||||||
|
// This field exists to export the same values under the old names
|
||||||
|
// as well.
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msMetricsEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type rmMetricDesc struct {
|
||||||
|
metrics.Description
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
|
||||||
|
var descs []rmMetricDesc
|
||||||
|
for _, d := range metrics.All() {
|
||||||
|
var (
|
||||||
|
deny = true
|
||||||
|
desc rmMetricDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, r := range rules {
|
||||||
|
if !r.Matcher.MatchString(d.Name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
deny = r.Deny
|
||||||
|
}
|
||||||
|
if deny {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
desc.Description = d
|
||||||
|
descs = append(descs, desc)
|
||||||
|
}
|
||||||
|
return descs
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultGoCollectorOptions() internal.GoCollectorOptions {
|
||||||
|
return internal.GoCollectorOptions{
|
||||||
|
RuntimeMetricSumForHist: map[string]string{
|
||||||
|
"/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
|
||||||
|
"/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
|
||||||
|
},
|
||||||
|
RuntimeMetricRules: []internal.GoCollectorRule{
|
||||||
|
//{Matcher: regexp.MustCompile("")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewGoCollector instead.
|
||||||
|
func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
||||||
|
opt := defaultGoCollectorOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o(&opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
|
||||||
|
|
||||||
|
// Collect all histogram samples so that we can get their buckets.
|
||||||
|
// The API guarantees that the buckets are always fixed for the lifetime
|
||||||
|
// of the process.
|
||||||
|
var histograms []metrics.Sample
|
||||||
|
for _, d := range exposedDescriptions {
|
||||||
|
if d.Kind == metrics.KindFloat64Histogram {
|
||||||
|
histograms = append(histograms, metrics.Sample{Name: d.Name})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(histograms) > 0 {
|
||||||
|
metrics.Read(histograms)
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketsMap := make(map[string][]float64)
|
||||||
|
for i := range histograms {
|
||||||
|
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a collector for each exposed runtime/metrics metric.
|
||||||
|
metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
|
||||||
|
// SampleBuf is used for reading from runtime/metrics.
|
||||||
|
// We are assuming the largest case to have stable pointers for sampleMap purposes.
|
||||||
|
sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
|
||||||
|
sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
|
||||||
|
for _, d := range exposedDescriptions {
|
||||||
|
namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
|
||||||
|
if !ok {
|
||||||
|
// Just ignore this metric; we can't do anything with it here.
|
||||||
|
// If a user decides to use the latest version of Go, we don't want
|
||||||
|
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
|
||||||
|
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
|
||||||
|
var m collectorMetric
|
||||||
|
if d.Kind == metrics.KindFloat64Histogram {
|
||||||
|
_, hasSum := opt.RuntimeMetricSumForHist[d.Name]
|
||||||
|
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
|
||||||
|
m = newBatchHistogram(
|
||||||
|
NewDesc(
|
||||||
|
BuildFQName(namespace, subsystem, name),
|
||||||
|
d.Description.Description,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
|
||||||
|
hasSum,
|
||||||
|
)
|
||||||
|
} else if d.Cumulative {
|
||||||
|
m = NewCounter(CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: name,
|
||||||
|
Help: d.Description.Description,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
m = NewGauge(GaugeOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: name,
|
||||||
|
Help: d.Description.Description,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
metricSet = append(metricSet, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add exact sum metrics to sampleBuf if not added before.
|
||||||
|
for _, h := range histograms {
|
||||||
|
sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := sampleMap[sumMetric]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
|
||||||
|
sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msDescriptions []metrics.Description
|
||||||
|
)
|
||||||
|
|
||||||
|
if !opt.DisableMemStatsLikeMetrics {
|
||||||
|
msMetrics = goRuntimeMemStats()
|
||||||
|
msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
|
||||||
|
|
||||||
|
// Check if metric was not exposed before and if not, add to sampleBuf.
|
||||||
|
for _, mdDesc := range msDescriptions {
|
||||||
|
if _, ok := sampleMap[mdDesc.Name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
|
||||||
|
sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &goCollector{
|
||||||
|
base: newBaseGoCollector(),
|
||||||
|
sampleBuf: sampleBuf,
|
||||||
|
sampleMap: sampleMap,
|
||||||
|
rmExposedMetrics: metricSet,
|
||||||
|
rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
|
||||||
|
msMetrics: msMetrics,
|
||||||
|
msMetricsEnabled: !opt.DisableMemStatsLikeMetrics,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe returns all descriptions of the collector.
|
||||||
|
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
|
c.base.Describe(ch)
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- i.desc
|
||||||
|
}
|
||||||
|
for _, m := range c.rmExposedMetrics {
|
||||||
|
ch <- m.Desc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the current state of all metrics of the collector.
|
||||||
|
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
// Collect base non-memory metrics.
|
||||||
|
c.base.Collect(ch)
|
||||||
|
|
||||||
|
if len(c.sampleBuf) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect must be thread-safe, so prevent concurrent use of
|
||||||
|
// sampleBuf elements. Just read into sampleBuf but write all the data
|
||||||
|
// we get into our Metrics or MemStats.
|
||||||
|
//
|
||||||
|
// This lock also ensures that the Metrics we send out are all from
|
||||||
|
// the same updates, ensuring their mutual consistency insofar as
|
||||||
|
// is guaranteed by the runtime/metrics package.
|
||||||
|
//
|
||||||
|
// N.B. This locking is heavy-handed, but Collect is expected to be called
|
||||||
|
// relatively infrequently. Also the core operation here, metrics.Read,
|
||||||
|
// is fast (O(tens of microseconds)) so contention should certainly be
|
||||||
|
// low, though channel operations and any allocations may add to that.
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
// Populate runtime/metrics sample buffer.
|
||||||
|
metrics.Read(c.sampleBuf)
|
||||||
|
|
||||||
|
// Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
|
||||||
|
for i, metric := range c.rmExposedMetrics {
|
||||||
|
// We created samples for exposed metrics first in order, so indexes match.
|
||||||
|
sample := c.sampleBuf[i]
|
||||||
|
|
||||||
|
// N.B. switch on concrete type because it's significantly more efficient
|
||||||
|
// than checking for the Counter and Gauge interface implementations. In
|
||||||
|
// this case, we control all the types here.
|
||||||
|
switch m := metric.(type) {
|
||||||
|
case *counter:
|
||||||
|
// Guard against decreases. This should never happen, but a failure
|
||||||
|
// to do so will result in a panic, which is a harsh consequence for
|
||||||
|
// a metrics collection bug.
|
||||||
|
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
|
||||||
|
if v1 > v0 {
|
||||||
|
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
|
||||||
|
}
|
||||||
|
m.Collect(ch)
|
||||||
|
case *gauge:
|
||||||
|
m.Set(unwrapScalarRMValue(sample.Value))
|
||||||
|
m.Collect(ch)
|
||||||
|
case *batchHistogram:
|
||||||
|
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
|
||||||
|
m.Collect(ch)
|
||||||
|
default:
|
||||||
|
panic("unexpected metric type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.msMetricsEnabled {
|
||||||
|
// ms is a dummy MemStats that we populate ourselves so that we can
|
||||||
|
// populate the old metrics from it if goMemStatsCollection is enabled.
|
||||||
|
var ms runtime.MemStats
|
||||||
|
memStatsFromRM(&ms, c.sampleMap)
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
|
||||||
|
// to be scalar and returns the equivalent float64 value. Panics if the
|
||||||
|
// value is not scalar.
|
||||||
|
func unwrapScalarRMValue(v metrics.Value) float64 {
|
||||||
|
switch v.Kind() {
|
||||||
|
case metrics.KindUint64:
|
||||||
|
return float64(v.Uint64())
|
||||||
|
case metrics.KindFloat64:
|
||||||
|
return v.Float64()
|
||||||
|
case metrics.KindBad:
|
||||||
|
// Unsupported metric.
|
||||||
|
//
|
||||||
|
// This should never happen because we always populate our metric
|
||||||
|
// set from the runtime/metrics package.
|
||||||
|
panic("unexpected unsupported metric")
|
||||||
|
default:
|
||||||
|
// Unsupported metric kind.
|
||||||
|
//
|
||||||
|
// This should never happen because we check for this during initialization
|
||||||
|
// and flag and filter metrics whose kinds we don't understand.
|
||||||
|
panic("unexpected unsupported metric kind")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// exactSumFor takes a runtime/metrics metric name (that is assumed to
|
||||||
|
// be of kind KindFloat64Histogram) and returns its exact sum and whether
|
||||||
|
// its exact sum exists.
|
||||||
|
//
|
||||||
|
// The runtime/metrics API for histograms doesn't currently expose exact
|
||||||
|
// sums, but some of the other metrics are in fact exact sums of histograms.
|
||||||
|
func (c *goCollector) exactSumFor(rmName string) float64 {
|
||||||
|
sumName, ok := c.rmExactSumMapForHist[rmName]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
s, ok := c.sampleMap[sumName]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return unwrapScalarRMValue(s.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
|
||||||
|
lookupOrZero := func(name string) uint64 {
|
||||||
|
if s, ok := rm[name]; ok {
|
||||||
|
return s.Value.Uint64()
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
|
||||||
|
// The reason for this is because MemStats couldn't be extended at the time
|
||||||
|
// but there was a desire to have Mallocs at least be a little more representative,
|
||||||
|
// while having Mallocs - Frees still represent a live object count.
|
||||||
|
// Unfortunately, MemStats doesn't actually export a large allocation count,
|
||||||
|
// so it's impossible to pull this number out directly.
|
||||||
|
tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
|
||||||
|
ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
|
||||||
|
ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
|
||||||
|
|
||||||
|
ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
|
||||||
|
ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
|
||||||
|
ms.Lookups = 0 // Already always zero.
|
||||||
|
ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
|
||||||
|
ms.Alloc = ms.HeapAlloc
|
||||||
|
ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
|
||||||
|
ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
|
||||||
|
ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
|
||||||
|
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
|
||||||
|
ms.HeapObjects = lookupOrZero(goGCHeapObjects)
|
||||||
|
ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
|
||||||
|
ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
|
||||||
|
ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
|
||||||
|
ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
|
||||||
|
ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
|
||||||
|
ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
|
||||||
|
ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
|
||||||
|
ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
|
||||||
|
ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
|
||||||
|
ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
|
||||||
|
|
||||||
|
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
|
||||||
|
// and often misleading due to the fact that it's an average over the lifetime
|
||||||
|
// of the process.
|
||||||
|
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
|
||||||
|
// for more details.
|
||||||
|
ms.GCCPUFraction = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchHistogram is a mutable histogram that is updated
|
||||||
|
// in batches.
|
||||||
|
type batchHistogram struct {
|
||||||
|
selfCollector
|
||||||
|
|
||||||
|
// Static fields updated only once.
|
||||||
|
desc *Desc
|
||||||
|
hasSum bool
|
||||||
|
|
||||||
|
// Because this histogram operates in batches, it just uses a
|
||||||
|
// single mutex for everything. updates are always serialized
|
||||||
|
// but Write calls may operate concurrently with updates.
|
||||||
|
// Contention between these two sources should be rare.
|
||||||
|
mu sync.Mutex
|
||||||
|
buckets []float64 // Inclusive lower bounds, like runtime/metrics.
|
||||||
|
counts []uint64
|
||||||
|
sum float64 // Used if hasSum is true.
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBatchHistogram creates a new batch histogram value with the given
|
||||||
|
// Desc, buckets, and whether or not it has an exact sum available.
|
||||||
|
//
|
||||||
|
// buckets must always be from the runtime/metrics package, following
|
||||||
|
// the same conventions.
|
||||||
|
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
|
||||||
|
// We need to remove -Inf values. runtime/metrics keeps them around.
|
||||||
|
// But -Inf bucket should not be allowed for prometheus histograms.
|
||||||
|
if buckets[0] == math.Inf(-1) {
|
||||||
|
buckets = buckets[1:]
|
||||||
|
}
|
||||||
|
h := &batchHistogram{
|
||||||
|
desc: desc,
|
||||||
|
buckets: buckets,
|
||||||
|
// Because buckets follows runtime/metrics conventions, there's
|
||||||
|
// 1 more value in the buckets list than there are buckets represented,
|
||||||
|
// because in runtime/metrics, the bucket values represent *boundaries*,
|
||||||
|
// and non-Inf boundaries are inclusive lower bounds for that bucket.
|
||||||
|
counts: make([]uint64, len(buckets)-1),
|
||||||
|
hasSum: hasSum,
|
||||||
|
}
|
||||||
|
h.init(h)
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// update updates the batchHistogram from a runtime/metrics histogram.
|
||||||
|
//
|
||||||
|
// sum must be provided if the batchHistogram was created to have an exact sum.
|
||||||
|
// h.buckets must be a strict subset of his.Buckets.
|
||||||
|
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
|
||||||
|
counts, buckets := his.Counts, his.Buckets
|
||||||
|
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
|
||||||
|
// Clear buckets.
|
||||||
|
for i := range h.counts {
|
||||||
|
h.counts[i] = 0
|
||||||
|
}
|
||||||
|
// Copy and reduce buckets.
|
||||||
|
var j int
|
||||||
|
for i, count := range counts {
|
||||||
|
h.counts[j] += count
|
||||||
|
if buckets[i+1] == h.buckets[j+1] {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if h.hasSum {
|
||||||
|
h.sum = sum
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *batchHistogram) Desc() *Desc {
|
||||||
|
return h.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *batchHistogram) Write(out *dto.Metric) error {
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
|
||||||
|
sum := float64(0)
|
||||||
|
if h.hasSum {
|
||||||
|
sum = h.sum
|
||||||
|
}
|
||||||
|
dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
|
||||||
|
totalCount := uint64(0)
|
||||||
|
for i, count := range h.counts {
|
||||||
|
totalCount += count
|
||||||
|
if !h.hasSum {
|
||||||
|
if count != 0 {
|
||||||
|
// N.B. This computed sum is an underestimate.
|
||||||
|
sum += h.buckets[i] * float64(count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip the +Inf bucket, but only for the bucket list.
|
||||||
|
// It must still count for sum and totalCount.
|
||||||
|
if math.IsInf(h.buckets[i+1], 1) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Float64Histogram's upper bound is exclusive, so make it inclusive
|
||||||
|
// by obtaining the next float64 value down, in order.
|
||||||
|
upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
|
||||||
|
dtoBuckets = append(dtoBuckets, &dto.Bucket{
|
||||||
|
CumulativeCount: proto.Uint64(totalCount),
|
||||||
|
UpperBound: proto.Float64(upperBound),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
out.Histogram = &dto.Histogram{
|
||||||
|
Bucket: dtoBuckets,
|
||||||
|
SampleCount: proto.Uint64(totalCount),
|
||||||
|
SampleSum: proto.Float64(sum),
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
1531
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
Normal file
1531
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
60
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
generated
vendored
Normal file
60
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
// Copyright (c) 2015 Björn Rabenstein
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
// SOFTWARE.
|
||||||
|
//
|
||||||
|
// The code in this package is copy/paste to avoid a dependency. Hence this file
|
||||||
|
// carries the copyright of the original repo.
|
||||||
|
// https://github.com/beorn7/floats
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// minNormalFloat64 is the smallest positive normal value of type float64.
|
||||||
|
var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
|
||||||
|
|
||||||
|
// AlmostEqualFloat64 returns true if a and b are equal within a relative error
|
||||||
|
// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
|
||||||
|
// details of the applied method.
|
||||||
|
func AlmostEqualFloat64(a, b, epsilon float64) bool {
|
||||||
|
if a == b {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
absA := math.Abs(a)
|
||||||
|
absB := math.Abs(b)
|
||||||
|
diff := math.Abs(a - b)
|
||||||
|
if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
|
||||||
|
return diff < epsilon*minNormalFloat64
|
||||||
|
}
|
||||||
|
return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
|
||||||
|
}
|
||||||
|
|
||||||
|
// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
|
||||||
|
func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range a {
|
||||||
|
if !AlmostEqualFloat64(a[i], b[i], epsilon) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
654
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
Normal file
654
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
Normal file
|
@ -0,0 +1,654 @@
|
||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
// It provides tools to compare sequences of strings and generate textual diffs.
|
||||||
|
//
|
||||||
|
// Maintaining `GetUnifiedDiffString` here because original repository
|
||||||
|
// (https://github.com/pmezard/go-difflib) is no longer maintained.
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(a, b int) int {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateRatio(matches, length int) float64 {
|
||||||
|
if length > 0 {
|
||||||
|
return 2.0 * float64(matches) / float64(length)
|
||||||
|
}
|
||||||
|
return 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
type Match struct {
|
||||||
|
A int
|
||||||
|
B int
|
||||||
|
Size int
|
||||||
|
}
|
||||||
|
|
||||||
|
type OpCode struct {
|
||||||
|
Tag byte
|
||||||
|
I1 int
|
||||||
|
I2 int
|
||||||
|
J1 int
|
||||||
|
J2 int
|
||||||
|
}
|
||||||
|
|
||||||
|
// SequenceMatcher compares sequence of strings. The basic
|
||||||
|
// algorithm predates, and is a little fancier than, an algorithm
|
||||||
|
// published in the late 1980's by Ratcliff and Obershelp under the
|
||||||
|
// hyperbolic name "gestalt pattern matching". The basic idea is to find
|
||||||
|
// the longest contiguous matching subsequence that contains no "junk"
|
||||||
|
// elements (R-O doesn't address junk). The same idea is then applied
|
||||||
|
// recursively to the pieces of the sequences to the left and to the right
|
||||||
|
// of the matching subsequence. This does not yield minimal edit
|
||||||
|
// sequences, but does tend to yield matches that "look right" to people.
|
||||||
|
//
|
||||||
|
// SequenceMatcher tries to compute a "human-friendly diff" between two
|
||||||
|
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
||||||
|
// longest *contiguous* & junk-free matching subsequence. That's what
|
||||||
|
// catches peoples' eyes. The Windows(tm) windiff has another interesting
|
||||||
|
// notion, pairing up elements that appear uniquely in each sequence.
|
||||||
|
// That, and the method here, appear to yield more intuitive difference
|
||||||
|
// reports than does diff. This method appears to be the least vulnerable
|
||||||
|
// to synching up on blocks of "junk lines", though (like blank lines in
|
||||||
|
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
||||||
|
// because this is the only method of the 3 that has a *concept* of
|
||||||
|
// "junk" <wink>.
|
||||||
|
//
|
||||||
|
// Timing: Basic R-O is cubic time worst case and quadratic time expected
|
||||||
|
// case. SequenceMatcher is quadratic time for the worst case and has
|
||||||
|
// expected-case behavior dependent in a complicated way on how many
|
||||||
|
// elements the sequences have in common; best case time is linear.
|
||||||
|
type SequenceMatcher struct {
|
||||||
|
a []string
|
||||||
|
b []string
|
||||||
|
b2j map[string][]int
|
||||||
|
IsJunk func(string) bool
|
||||||
|
autoJunk bool
|
||||||
|
bJunk map[string]struct{}
|
||||||
|
matchingBlocks []Match
|
||||||
|
fullBCount map[string]int
|
||||||
|
bPopular map[string]struct{}
|
||||||
|
opCodes []OpCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMatcher(a, b []string) *SequenceMatcher {
|
||||||
|
m := SequenceMatcher{autoJunk: true}
|
||||||
|
m.SetSeqs(a, b)
|
||||||
|
return &m
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMatcherWithJunk(a, b []string, autoJunk bool,
|
||||||
|
isJunk func(string) bool,
|
||||||
|
) *SequenceMatcher {
|
||||||
|
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
|
||||||
|
m.SetSeqs(a, b)
|
||||||
|
return &m
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set two sequences to be compared.
|
||||||
|
func (m *SequenceMatcher) SetSeqs(a, b []string) {
|
||||||
|
m.SetSeq1(a)
|
||||||
|
m.SetSeq2(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the first sequence to be compared. The second sequence to be compared is
|
||||||
|
// not changed.
|
||||||
|
//
|
||||||
|
// SequenceMatcher computes and caches detailed information about the second
|
||||||
|
// sequence, so if you want to compare one sequence S against many sequences,
|
||||||
|
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
|
||||||
|
// sequences.
|
||||||
|
//
|
||||||
|
// See also SetSeqs() and SetSeq2().
|
||||||
|
func (m *SequenceMatcher) SetSeq1(a []string) {
|
||||||
|
if &a == &m.a {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.a = a
|
||||||
|
m.matchingBlocks = nil
|
||||||
|
m.opCodes = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the second sequence to be compared. The first sequence to be compared is
|
||||||
|
// not changed.
|
||||||
|
func (m *SequenceMatcher) SetSeq2(b []string) {
|
||||||
|
if &b == &m.b {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.b = b
|
||||||
|
m.matchingBlocks = nil
|
||||||
|
m.opCodes = nil
|
||||||
|
m.fullBCount = nil
|
||||||
|
m.chainB()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SequenceMatcher) chainB() {
|
||||||
|
// Populate line -> index mapping
|
||||||
|
b2j := map[string][]int{}
|
||||||
|
for i, s := range m.b {
|
||||||
|
indices := b2j[s]
|
||||||
|
indices = append(indices, i)
|
||||||
|
b2j[s] = indices
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge junk elements
|
||||||
|
m.bJunk = map[string]struct{}{}
|
||||||
|
if m.IsJunk != nil {
|
||||||
|
junk := m.bJunk
|
||||||
|
for s := range b2j {
|
||||||
|
if m.IsJunk(s) {
|
||||||
|
junk[s] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for s := range junk {
|
||||||
|
delete(b2j, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge remaining popular elements
|
||||||
|
popular := map[string]struct{}{}
|
||||||
|
n := len(m.b)
|
||||||
|
if m.autoJunk && n >= 200 {
|
||||||
|
ntest := n/100 + 1
|
||||||
|
for s, indices := range b2j {
|
||||||
|
if len(indices) > ntest {
|
||||||
|
popular[s] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for s := range popular {
|
||||||
|
delete(b2j, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.bPopular = popular
|
||||||
|
m.b2j = b2j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SequenceMatcher) isBJunk(s string) bool {
|
||||||
|
_, ok := m.bJunk[s]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
||||||
|
//
|
||||||
|
// If IsJunk is not defined:
|
||||||
|
//
|
||||||
|
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||||
|
//
|
||||||
|
// alo <= i <= i+k <= ahi
|
||||||
|
// blo <= j <= j+k <= bhi
|
||||||
|
//
|
||||||
|
// and for all (i',j',k') meeting those conditions,
|
||||||
|
//
|
||||||
|
// k >= k'
|
||||||
|
// i <= i'
|
||||||
|
// and if i == i', j <= j'
|
||||||
|
//
|
||||||
|
// In other words, of all maximal matching blocks, return one that
|
||||||
|
// starts earliest in a, and of all those maximal matching blocks that
|
||||||
|
// start earliest in a, return the one that starts earliest in b.
|
||||||
|
//
|
||||||
|
// If IsJunk is defined, first the longest matching block is
|
||||||
|
// determined as above, but with the additional restriction that no
|
||||||
|
// junk element appears in the block. Then that block is extended as
|
||||||
|
// far as possible by matching (only) junk elements on both sides. So
|
||||||
|
// the resulting block never matches on junk except as identical junk
|
||||||
|
// happens to be adjacent to an "interesting" match.
|
||||||
|
//
|
||||||
|
// If no blocks match, return (alo, blo, 0).
|
||||||
|
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
|
||||||
|
// CAUTION: stripping common prefix or suffix would be incorrect.
|
||||||
|
// E.g.,
|
||||||
|
// ab
|
||||||
|
// acab
|
||||||
|
// Longest matching block is "ab", but if common prefix is
|
||||||
|
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
||||||
|
// strip, so ends up claiming that ab is changed to acab by
|
||||||
|
// inserting "ca" in the middle. That's minimal but unintuitive:
|
||||||
|
// "it's obvious" that someone inserted "ac" at the front.
|
||||||
|
// Windiff ends up at the same place as diff, but by pairing up
|
||||||
|
// the unique 'b's and then matching the first two 'a's.
|
||||||
|
besti, bestj, bestsize := alo, blo, 0
|
||||||
|
|
||||||
|
// find longest junk-free match
|
||||||
|
// during an iteration of the loop, j2len[j] = length of longest
|
||||||
|
// junk-free match ending with a[i-1] and b[j]
|
||||||
|
j2len := map[int]int{}
|
||||||
|
for i := alo; i != ahi; i++ {
|
||||||
|
// look at all instances of a[i] in b; note that because
|
||||||
|
// b2j has no junk keys, the loop is skipped if a[i] is junk
|
||||||
|
newj2len := map[int]int{}
|
||||||
|
for _, j := range m.b2j[m.a[i]] {
|
||||||
|
// a[i] matches b[j]
|
||||||
|
if j < blo {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if j >= bhi {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
k := j2len[j-1] + 1
|
||||||
|
newj2len[j] = k
|
||||||
|
if k > bestsize {
|
||||||
|
besti, bestj, bestsize = i-k+1, j-k+1, k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
j2len = newj2len
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend the best by non-junk elements on each end. In particular,
|
||||||
|
// "popular" non-junk elements aren't in b2j, which greatly speeds
|
||||||
|
// the inner loop above, but also means "the best" match so far
|
||||||
|
// doesn't contain any junk *or* popular non-junk elements.
|
||||||
|
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
|
||||||
|
m.a[besti-1] == m.b[bestj-1] {
|
||||||
|
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||||
|
}
|
||||||
|
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||||
|
!m.isBJunk(m.b[bestj+bestsize]) &&
|
||||||
|
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||||
|
bestsize++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that we have a wholly interesting match (albeit possibly
|
||||||
|
// empty!), we may as well suck up the matching junk on each
|
||||||
|
// side of it too. Can't think of a good reason not to, and it
|
||||||
|
// saves post-processing the (possibly considerable) expense of
|
||||||
|
// figuring out what to do with it. In the case of an empty
|
||||||
|
// interesting match, this is clearly the right thing to do,
|
||||||
|
// because no other kind of match is possible in the regions.
|
||||||
|
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
|
||||||
|
m.a[besti-1] == m.b[bestj-1] {
|
||||||
|
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||||
|
}
|
||||||
|
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||||
|
m.isBJunk(m.b[bestj+bestsize]) &&
|
||||||
|
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||||
|
bestsize++
|
||||||
|
}
|
||||||
|
|
||||||
|
return Match{A: besti, B: bestj, Size: bestsize}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return list of triples describing matching subsequences.
|
||||||
|
//
|
||||||
|
// Each triple is of the form (i, j, n), and means that
|
||||||
|
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
||||||
|
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
|
||||||
|
// adjacent triples in the list, and the second is not the last triple in the
|
||||||
|
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
|
||||||
|
// adjacent equal blocks.
|
||||||
|
//
|
||||||
|
// The last triple is a dummy, (len(a), len(b), 0), and is the only
|
||||||
|
// triple with n==0.
|
||||||
|
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
|
||||||
|
if m.matchingBlocks != nil {
|
||||||
|
return m.matchingBlocks
|
||||||
|
}
|
||||||
|
|
||||||
|
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
|
||||||
|
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
|
||||||
|
match := m.findLongestMatch(alo, ahi, blo, bhi)
|
||||||
|
i, j, k := match.A, match.B, match.Size
|
||||||
|
if match.Size > 0 {
|
||||||
|
if alo < i && blo < j {
|
||||||
|
matched = matchBlocks(alo, i, blo, j, matched)
|
||||||
|
}
|
||||||
|
matched = append(matched, match)
|
||||||
|
if i+k < ahi && j+k < bhi {
|
||||||
|
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matched
|
||||||
|
}
|
||||||
|
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
|
||||||
|
|
||||||
|
// It's possible that we have adjacent equal blocks in the
|
||||||
|
// matching_blocks list now.
|
||||||
|
nonAdjacent := []Match{}
|
||||||
|
i1, j1, k1 := 0, 0, 0
|
||||||
|
for _, b := range matched {
|
||||||
|
// Is this block adjacent to i1, j1, k1?
|
||||||
|
i2, j2, k2 := b.A, b.B, b.Size
|
||||||
|
if i1+k1 == i2 && j1+k1 == j2 {
|
||||||
|
// Yes, so collapse them -- this just increases the length of
|
||||||
|
// the first block by the length of the second, and the first
|
||||||
|
// block so lengthened remains the block to compare against.
|
||||||
|
k1 += k2
|
||||||
|
} else {
|
||||||
|
// Not adjacent. Remember the first block (k1==0 means it's
|
||||||
|
// the dummy we started with), and make the second block the
|
||||||
|
// new block to compare against.
|
||||||
|
if k1 > 0 {
|
||||||
|
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||||
|
}
|
||||||
|
i1, j1, k1 = i2, j2, k2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if k1 > 0 {
|
||||||
|
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||||
|
}
|
||||||
|
|
||||||
|
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
|
||||||
|
m.matchingBlocks = nonAdjacent
|
||||||
|
return m.matchingBlocks
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return list of 5-tuples describing how to turn a into b.
|
||||||
|
//
|
||||||
|
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
||||||
|
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
||||||
|
// tuple preceding it, and likewise for j1 == the previous j2.
|
||||||
|
//
|
||||||
|
// The tags are characters, with these meanings:
|
||||||
|
//
|
||||||
|
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
|
||||||
|
//
|
||||||
|
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
|
||||||
|
//
|
||||||
|
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
|
||||||
|
//
|
||||||
|
// 'e' (equal): a[i1:i2] == b[j1:j2]
|
||||||
|
func (m *SequenceMatcher) GetOpCodes() []OpCode {
|
||||||
|
if m.opCodes != nil {
|
||||||
|
return m.opCodes
|
||||||
|
}
|
||||||
|
i, j := 0, 0
|
||||||
|
matching := m.GetMatchingBlocks()
|
||||||
|
opCodes := make([]OpCode, 0, len(matching))
|
||||||
|
for _, m := range matching {
|
||||||
|
// invariant: we've pumped out correct diffs to change
|
||||||
|
// a[:i] into b[:j], and the next matching block is
|
||||||
|
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
||||||
|
// out a diff to change a[i:ai] into b[j:bj], pump out
|
||||||
|
// the matching block, and move (i,j) beyond the match
|
||||||
|
ai, bj, size := m.A, m.B, m.Size
|
||||||
|
tag := byte(0)
|
||||||
|
if i < ai && j < bj {
|
||||||
|
tag = 'r'
|
||||||
|
} else if i < ai {
|
||||||
|
tag = 'd'
|
||||||
|
} else if j < bj {
|
||||||
|
tag = 'i'
|
||||||
|
}
|
||||||
|
if tag > 0 {
|
||||||
|
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
|
||||||
|
}
|
||||||
|
i, j = ai+size, bj+size
|
||||||
|
// the list of matching blocks is terminated by a
|
||||||
|
// sentinel with size 0
|
||||||
|
if size > 0 {
|
||||||
|
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.opCodes = opCodes
|
||||||
|
return m.opCodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Isolate change clusters by eliminating ranges with no changes.
|
||||||
|
//
|
||||||
|
// Return a generator of groups with up to n lines of context.
|
||||||
|
// Each group is in the same format as returned by GetOpCodes().
|
||||||
|
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||||
|
if n < 0 {
|
||||||
|
n = 3
|
||||||
|
}
|
||||||
|
codes := m.GetOpCodes()
|
||||||
|
if len(codes) == 0 {
|
||||||
|
codes = []OpCode{{'e', 0, 1, 0, 1}}
|
||||||
|
}
|
||||||
|
// Fixup leading and trailing groups if they show no changes.
|
||||||
|
if codes[0].Tag == 'e' {
|
||||||
|
c := codes[0]
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||||
|
}
|
||||||
|
if codes[len(codes)-1].Tag == 'e' {
|
||||||
|
c := codes[len(codes)-1]
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||||
|
}
|
||||||
|
nn := n + n
|
||||||
|
groups := [][]OpCode{}
|
||||||
|
group := []OpCode{}
|
||||||
|
for _, c := range codes {
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
// End the current group and start a new one whenever
|
||||||
|
// there is a large range with no changes.
|
||||||
|
if c.Tag == 'e' && i2-i1 > nn {
|
||||||
|
group = append(group, OpCode{
|
||||||
|
c.Tag, i1, min(i2, i1+n),
|
||||||
|
j1, min(j2, j1+n),
|
||||||
|
})
|
||||||
|
groups = append(groups, group)
|
||||||
|
group = []OpCode{}
|
||||||
|
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||||
|
}
|
||||||
|
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||||
|
}
|
||||||
|
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
|
||||||
|
groups = append(groups, group)
|
||||||
|
}
|
||||||
|
return groups
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a measure of the sequences' similarity (float in [0,1]).
|
||||||
|
//
|
||||||
|
// Where T is the total number of elements in both sequences, and
|
||||||
|
// M is the number of matches, this is 2.0*M / T.
|
||||||
|
// Note that this is 1 if the sequences are identical, and 0 if
|
||||||
|
// they have nothing in common.
|
||||||
|
//
|
||||||
|
// .Ratio() is expensive to compute if you haven't already computed
|
||||||
|
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
|
||||||
|
// want to try .QuickRatio() or .RealQuickRation() first to get an
|
||||||
|
// upper bound.
|
||||||
|
func (m *SequenceMatcher) Ratio() float64 {
|
||||||
|
matches := 0
|
||||||
|
for _, m := range m.GetMatchingBlocks() {
|
||||||
|
matches += m.Size
|
||||||
|
}
|
||||||
|
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an upper bound on ratio() relatively quickly.
|
||||||
|
//
|
||||||
|
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||||
|
// is faster to compute.
|
||||||
|
func (m *SequenceMatcher) QuickRatio() float64 {
|
||||||
|
// viewing a and b as multisets, set matches to the cardinality
|
||||||
|
// of their intersection; this counts the number of matches
|
||||||
|
// without regard to order, so is clearly an upper bound
|
||||||
|
if m.fullBCount == nil {
|
||||||
|
m.fullBCount = map[string]int{}
|
||||||
|
for _, s := range m.b {
|
||||||
|
m.fullBCount[s]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// avail[x] is the number of times x appears in 'b' less the
|
||||||
|
// number of times we've seen it in 'a' so far ... kinda
|
||||||
|
avail := map[string]int{}
|
||||||
|
matches := 0
|
||||||
|
for _, s := range m.a {
|
||||||
|
n, ok := avail[s]
|
||||||
|
if !ok {
|
||||||
|
n = m.fullBCount[s]
|
||||||
|
}
|
||||||
|
avail[s] = n - 1
|
||||||
|
if n > 0 {
|
||||||
|
matches++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an upper bound on ratio() very quickly.
|
||||||
|
//
|
||||||
|
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||||
|
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||||
|
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||||
|
la, lb := len(m.a), len(m.b)
|
||||||
|
return calculateRatio(min(la, lb), la+lb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert range to the "ed" format
|
||||||
|
func formatRangeUnified(start, stop int) string {
|
||||||
|
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||||
|
beginning := start + 1 // lines start numbering with one
|
||||||
|
length := stop - start
|
||||||
|
if length == 1 {
|
||||||
|
return fmt.Sprintf("%d", beginning)
|
||||||
|
}
|
||||||
|
if length == 0 {
|
||||||
|
beginning-- // empty ranges begin at line just before the range
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d,%d", beginning, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unified diff parameters
|
||||||
|
type UnifiedDiff struct {
|
||||||
|
A []string // First sequence lines
|
||||||
|
FromFile string // First file name
|
||||||
|
FromDate string // First file time
|
||||||
|
B []string // Second sequence lines
|
||||||
|
ToFile string // Second file name
|
||||||
|
ToDate string // Second file time
|
||||||
|
Eol string // Headers end of line, defaults to LF
|
||||||
|
Context int // Number of context lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two sequences of lines; generate the delta as a unified diff.
|
||||||
|
//
|
||||||
|
// Unified diffs are a compact way of showing line changes and a few
|
||||||
|
// lines of context. The number of context lines is set by 'n' which
|
||||||
|
// defaults to three.
|
||||||
|
//
|
||||||
|
// By default, the diff control lines (those with ---, +++, or @@) are
|
||||||
|
// created with a trailing newline. This is helpful so that inputs
|
||||||
|
// created from file.readlines() result in diffs that are suitable for
|
||||||
|
// file.writelines() since both the inputs and outputs have trailing
|
||||||
|
// newlines.
|
||||||
|
//
|
||||||
|
// For inputs that do not have trailing newlines, set the lineterm
|
||||||
|
// argument to "" so that the output will be uniformly newline free.
|
||||||
|
//
|
||||||
|
// The unidiff format normally has a header for filenames and modification
|
||||||
|
// times. Any or all of these may be specified using strings for
|
||||||
|
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
||||||
|
// The modification times are normally expressed in the ISO 8601 format.
|
||||||
|
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||||
|
buf := bufio.NewWriter(writer)
|
||||||
|
defer buf.Flush()
|
||||||
|
wf := func(format string, args ...interface{}) error {
|
||||||
|
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ws := func(s string) error {
|
||||||
|
_, err := buf.WriteString(s)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(diff.Eol) == 0 {
|
||||||
|
diff.Eol = "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
started := false
|
||||||
|
m := NewMatcher(diff.A, diff.B)
|
||||||
|
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||||
|
if !started {
|
||||||
|
started = true
|
||||||
|
fromDate := ""
|
||||||
|
if len(diff.FromDate) > 0 {
|
||||||
|
fromDate = "\t" + diff.FromDate
|
||||||
|
}
|
||||||
|
toDate := ""
|
||||||
|
if len(diff.ToDate) > 0 {
|
||||||
|
toDate = "\t" + diff.ToDate
|
||||||
|
}
|
||||||
|
if diff.FromFile != "" || diff.ToFile != "" {
|
||||||
|
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
first, last := g[0], g[len(g)-1]
|
||||||
|
range1 := formatRangeUnified(first.I1, last.I2)
|
||||||
|
range2 := formatRangeUnified(first.J1, last.J2)
|
||||||
|
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, c := range g {
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
if c.Tag == 'e' {
|
||||||
|
for _, line := range diff.A[i1:i2] {
|
||||||
|
if err := ws(" " + line); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c.Tag == 'r' || c.Tag == 'd' {
|
||||||
|
for _, line := range diff.A[i1:i2] {
|
||||||
|
if err := ws("-" + line); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.Tag == 'r' || c.Tag == 'i' {
|
||||||
|
for _, line := range diff.B[j1:j2] {
|
||||||
|
if err := ws("+" + line); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Like WriteUnifiedDiff but returns the diff a string.
|
||||||
|
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
|
||||||
|
w := &bytes.Buffer{}
|
||||||
|
err := WriteUnifiedDiff(w, diff)
|
||||||
|
return w.String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split a string on "\n" while preserving them. The output can be used
|
||||||
|
// as input for UnifiedDiff and ContextDiff structures.
|
||||||
|
func SplitLines(s string) []string {
|
||||||
|
lines := strings.SplitAfter(s, "\n")
|
||||||
|
lines[len(lines)-1] += "\n"
|
||||||
|
return lines
|
||||||
|
}
|
32
vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
generated
vendored
Normal file
32
vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import "regexp"
|
||||||
|
|
||||||
|
type GoCollectorRule struct {
|
||||||
|
Matcher *regexp.Regexp
|
||||||
|
Deny bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
|
||||||
|
// Use it via collectors package instead. See issue
|
||||||
|
// https://github.com/prometheus/client_golang/issues/1030.
|
||||||
|
//
|
||||||
|
// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
|
||||||
|
type GoCollectorOptions struct {
|
||||||
|
DisableMemStatsLikeMetrics bool
|
||||||
|
RuntimeMetricSumForHist map[string]string
|
||||||
|
RuntimeMetricRules []GoCollectorRule
|
||||||
|
}
|
142
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
Normal file
142
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build go1.17
|
||||||
|
// +build go1.17
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"path"
|
||||||
|
"runtime/metrics"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
|
||||||
|
// metric description and validates whether the metric is suitable for integration
|
||||||
|
// with Prometheus.
|
||||||
|
//
|
||||||
|
// Returns false if a name could not be produced, or if Prometheus does not understand
|
||||||
|
// the runtime/metrics Kind.
|
||||||
|
//
|
||||||
|
// Note that the main reason a name couldn't be produced is if the runtime/metrics
|
||||||
|
// package exports a name with characters outside the valid Prometheus metric name
|
||||||
|
// character set. This is theoretically possible, but should never happen in practice.
|
||||||
|
// Still, don't rely on it.
|
||||||
|
func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
|
||||||
|
namespace := "go"
|
||||||
|
|
||||||
|
comp := strings.SplitN(d.Name, ":", 2)
|
||||||
|
key := comp[0]
|
||||||
|
unit := comp[1]
|
||||||
|
|
||||||
|
// The last path element in the key is the name,
|
||||||
|
// the rest is the subsystem.
|
||||||
|
subsystem := path.Dir(key[1:] /* remove leading / */)
|
||||||
|
name := path.Base(key)
|
||||||
|
|
||||||
|
// subsystem is translated by replacing all / and - with _.
|
||||||
|
subsystem = strings.ReplaceAll(subsystem, "/", "_")
|
||||||
|
subsystem = strings.ReplaceAll(subsystem, "-", "_")
|
||||||
|
|
||||||
|
// unit is translated assuming that the unit contains no
|
||||||
|
// non-ASCII characters.
|
||||||
|
unit = strings.ReplaceAll(unit, "-", "_")
|
||||||
|
unit = strings.ReplaceAll(unit, "*", "_")
|
||||||
|
unit = strings.ReplaceAll(unit, "/", "_per_")
|
||||||
|
|
||||||
|
// name has - replaced with _ and is concatenated with the unit and
|
||||||
|
// other data.
|
||||||
|
name = strings.ReplaceAll(name, "-", "_")
|
||||||
|
name += "_" + unit
|
||||||
|
if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
|
||||||
|
name += "_total"
|
||||||
|
}
|
||||||
|
|
||||||
|
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
|
||||||
|
switch d.Kind {
|
||||||
|
case metrics.KindUint64:
|
||||||
|
case metrics.KindFloat64:
|
||||||
|
case metrics.KindFloat64Histogram:
|
||||||
|
default:
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
return namespace, subsystem, name, valid
|
||||||
|
}
|
||||||
|
|
||||||
|
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
|
||||||
|
// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
|
||||||
|
// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
|
||||||
|
// as the bottom-most upper-bound inclusive bucket in Prometheus.
|
||||||
|
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
|
||||||
|
switch unit {
|
||||||
|
case "bytes":
|
||||||
|
// Re-bucket as powers of 2.
|
||||||
|
return reBucketExp(buckets, 2)
|
||||||
|
case "seconds":
|
||||||
|
// Re-bucket as powers of 10 and then merge all buckets greater
|
||||||
|
// than 1 second into the +Inf bucket.
|
||||||
|
b := reBucketExp(buckets, 10)
|
||||||
|
for i := range b {
|
||||||
|
if b[i] <= 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b[i] = math.Inf(1)
|
||||||
|
b = b[:i+1]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
return buckets
|
||||||
|
}
|
||||||
|
|
||||||
|
// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
|
||||||
|
// downsamples the buckets to those a multiple of base apart. The end result
|
||||||
|
// is a roughly exponential (in many cases, perfectly exponential) bucketing
|
||||||
|
// scheme.
|
||||||
|
func reBucketExp(buckets []float64, base float64) []float64 {
|
||||||
|
bucket := buckets[0]
|
||||||
|
var newBuckets []float64
|
||||||
|
// We may see a -Inf here, in which case, add it and skip it
|
||||||
|
// since we risk producing NaNs otherwise.
|
||||||
|
//
|
||||||
|
// We need to preserve -Inf values to maintain runtime/metrics
|
||||||
|
// conventions. We'll strip it out later.
|
||||||
|
if bucket == math.Inf(-1) {
|
||||||
|
newBuckets = append(newBuckets, bucket)
|
||||||
|
buckets = buckets[1:]
|
||||||
|
bucket = buckets[0]
|
||||||
|
}
|
||||||
|
// From now on, bucket should always have a non-Inf value because
|
||||||
|
// Infs are only ever at the ends of the bucket lists, so
|
||||||
|
// arithmetic operations on it are non-NaN.
|
||||||
|
for i := 1; i < len(buckets); i++ {
|
||||||
|
if bucket >= 0 && buckets[i] < bucket*base {
|
||||||
|
// The next bucket we want to include is at least bucket*base.
|
||||||
|
continue
|
||||||
|
} else if bucket < 0 && buckets[i] < bucket/base {
|
||||||
|
// In this case the bucket we're targeting is negative, and since
|
||||||
|
// we're ascending through buckets here, we need to divide to get
|
||||||
|
// closer to zero exponentially.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// The +Inf bucket will always be the last one, and we'll always
|
||||||
|
// end up including it here because bucket
|
||||||
|
newBuckets = append(newBuckets, bucket)
|
||||||
|
bucket = buckets[i]
|
||||||
|
}
|
||||||
|
return append(newBuckets, bucket)
|
||||||
|
}
|
101
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
101
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||||
|
// dto.LabelPair pointers.
|
||||||
|
type LabelPairSorter []*dto.LabelPair
|
||||||
|
|
||||||
|
func (s LabelPairSorter) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s LabelPairSorter) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s LabelPairSorter) Less(i, j int) bool {
|
||||||
|
return s[i].GetName() < s[j].GetName()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricSorter is a sortable slice of *dto.Metric.
|
||||||
|
type MetricSorter []*dto.Metric
|
||||||
|
|
||||||
|
func (s MetricSorter) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MetricSorter) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MetricSorter) Less(i, j int) bool {
|
||||||
|
if len(s[i].Label) != len(s[j].Label) {
|
||||||
|
// This should not happen. The metrics are
|
||||||
|
// inconsistent. However, we have to deal with the fact, as
|
||||||
|
// people might use custom collectors or metric family injection
|
||||||
|
// to create inconsistent metrics. So let's simply compare the
|
||||||
|
// number of labels in this case. That will still yield
|
||||||
|
// reproducible sorting.
|
||||||
|
return len(s[i].Label) < len(s[j].Label)
|
||||||
|
}
|
||||||
|
for n, lp := range s[i].Label {
|
||||||
|
vi := lp.GetValue()
|
||||||
|
vj := s[j].Label[n].GetValue()
|
||||||
|
if vi != vj {
|
||||||
|
return vi < vj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We should never arrive here. Multiple metrics with the same
|
||||||
|
// label set in the same scrape will lead to undefined ingestion
|
||||||
|
// behavior. However, as above, we have to provide stable sorting
|
||||||
|
// here, even for inconsistent metrics. So sort equal metrics
|
||||||
|
// by their timestamp, with missing timestamps (implying "now")
|
||||||
|
// coming last.
|
||||||
|
if s[i].TimestampMs == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if s[j].TimestampMs == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NormalizeMetricFamilies returns a MetricFamily slice with empty
|
||||||
|
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
||||||
|
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||||
|
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||||
|
for _, mf := range metricFamiliesByName {
|
||||||
|
sort.Sort(MetricSorter(mf.Metric))
|
||||||
|
}
|
||||||
|
names := make([]string, 0, len(metricFamiliesByName))
|
||||||
|
for name, mf := range metricFamiliesByName {
|
||||||
|
if len(mf.Metric) > 0 {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
result := make([]*dto.MetricFamily, 0, len(names))
|
||||||
|
for _, name := range names {
|
||||||
|
result = append(result, metricFamiliesByName[name])
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
186
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
Normal file
186
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
Normal file
|
@ -0,0 +1,186 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Labels represents a collection of label name -> value mappings. This type is
|
||||||
|
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
||||||
|
// metric vector Collectors, e.g.:
|
||||||
|
//
|
||||||
|
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
|
//
|
||||||
|
// The other use-case is the specification of constant label pairs in Opts or to
|
||||||
|
// create a Desc.
|
||||||
|
type Labels map[string]string
|
||||||
|
|
||||||
|
// LabelConstraint normalizes label values.
|
||||||
|
type LabelConstraint func(string) string
|
||||||
|
|
||||||
|
// ConstrainedLabels represents a label name and its constrain function
|
||||||
|
// to normalize label values. This type is commonly used when constructing
|
||||||
|
// metric vector Collectors.
|
||||||
|
type ConstrainedLabel struct {
|
||||||
|
Name string
|
||||||
|
Constraint LabelConstraint
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstrainableLabels is an interface that allows creating of labels that can
|
||||||
|
// be optionally constrained.
|
||||||
|
//
|
||||||
|
// prometheus.V2().NewCounterVec(CounterVecOpts{
|
||||||
|
// CounterOpts: {...}, // Usual CounterOpts fields
|
||||||
|
// VariableLabels: []ConstrainedLabels{
|
||||||
|
// {Name: "A"},
|
||||||
|
// {Name: "B", Constraint: func(v string) string { ... }},
|
||||||
|
// },
|
||||||
|
// })
|
||||||
|
type ConstrainableLabels interface {
|
||||||
|
compile() *compiledLabels
|
||||||
|
labelNames() []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstrainedLabels represents a collection of label name -> constrain function
|
||||||
|
// to normalize label values. This type is commonly used when constructing
|
||||||
|
// metric vector Collectors.
|
||||||
|
type ConstrainedLabels []ConstrainedLabel
|
||||||
|
|
||||||
|
func (cls ConstrainedLabels) compile() *compiledLabels {
|
||||||
|
compiled := &compiledLabels{
|
||||||
|
names: make([]string, len(cls)),
|
||||||
|
labelConstraints: map[string]LabelConstraint{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, label := range cls {
|
||||||
|
compiled.names[i] = label.Name
|
||||||
|
if label.Constraint != nil {
|
||||||
|
compiled.labelConstraints[label.Name] = label.Constraint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return compiled
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cls ConstrainedLabels) labelNames() []string {
|
||||||
|
names := make([]string, len(cls))
|
||||||
|
for i, label := range cls {
|
||||||
|
names[i] = label.Name
|
||||||
|
}
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnconstrainedLabels represents collection of label without any constraint on
|
||||||
|
// their value. Thus, it is simply a collection of label names.
|
||||||
|
//
|
||||||
|
// UnconstrainedLabels([]string{ "A", "B" })
|
||||||
|
//
|
||||||
|
// is equivalent to
|
||||||
|
//
|
||||||
|
// ConstrainedLabels {
|
||||||
|
// { Name: "A" },
|
||||||
|
// { Name: "B" },
|
||||||
|
// }
|
||||||
|
type UnconstrainedLabels []string
|
||||||
|
|
||||||
|
func (uls UnconstrainedLabels) compile() *compiledLabels {
|
||||||
|
return &compiledLabels{
|
||||||
|
names: uls,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uls UnconstrainedLabels) labelNames() []string {
|
||||||
|
return uls
|
||||||
|
}
|
||||||
|
|
||||||
|
type compiledLabels struct {
|
||||||
|
names []string
|
||||||
|
labelConstraints map[string]LabelConstraint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cls *compiledLabels) compile() *compiledLabels {
|
||||||
|
return cls
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cls *compiledLabels) labelNames() []string {
|
||||||
|
return cls.names
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cls *compiledLabels) constrain(labelName, value string) string {
|
||||||
|
if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
|
||||||
|
return fn(value)
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||||
|
// label names.
|
||||||
|
const reservedLabelPrefix = "__"
|
||||||
|
|
||||||
|
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
||||||
|
|
||||||
|
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%w: %q has %d variable labels named %q but %d values %q were provided",
|
||||||
|
errInconsistentCardinality, fqName,
|
||||||
|
len(labels), labels,
|
||||||
|
len(labelValues), labelValues,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
|
||||||
|
if len(labels) != expectedNumberOfValues {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%w: expected %d label values but got %d in %#v",
|
||||||
|
errInconsistentCardinality, expectedNumberOfValues,
|
||||||
|
len(labels), labels,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, val := range labels {
|
||||||
|
if !utf8.ValidString(val) {
|
||||||
|
return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
|
||||||
|
if len(vals) != expectedNumberOfValues {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%w: expected %d label values but got %d in %#v",
|
||||||
|
errInconsistentCardinality, expectedNumberOfValues,
|
||||||
|
len(vals), vals,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, val := range vals {
|
||||||
|
if !utf8.ValidString(val) {
|
||||||
|
return fmt.Errorf("label value %q is not valid UTF-8", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLabelName(l string) bool {
|
||||||
|
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
|
||||||
|
}
|
257
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
Normal file
257
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
Normal file
|
@ -0,0 +1,257 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
|
||||||
|
|
||||||
|
// A Metric models a single sample value with its meta data being exported to
|
||||||
|
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
||||||
|
// Histogram, Summary, and Untyped.
|
||||||
|
type Metric interface {
|
||||||
|
// Desc returns the descriptor for the Metric. This method idempotently
|
||||||
|
// returns the same descriptor throughout the lifetime of the
|
||||||
|
// Metric. The returned descriptor is immutable by contract. A Metric
|
||||||
|
// unable to describe itself must return an invalid descriptor (created
|
||||||
|
// with NewInvalidDesc).
|
||||||
|
Desc() *Desc
|
||||||
|
// Write encodes the Metric into a "Metric" Protocol Buffer data
|
||||||
|
// transmission object.
|
||||||
|
//
|
||||||
|
// Metric implementations must observe concurrency safety as reads of
|
||||||
|
// this metric may occur at any time, and any blocking occurs at the
|
||||||
|
// expense of total performance of rendering all registered
|
||||||
|
// metrics. Ideally, Metric implementations should support concurrent
|
||||||
|
// readers.
|
||||||
|
//
|
||||||
|
// While populating dto.Metric, it is the responsibility of the
|
||||||
|
// implementation to ensure validity of the Metric protobuf (like valid
|
||||||
|
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||||
|
// recommended to sort labels lexicographically. Callers of Write should
|
||||||
|
// still make sure of sorting if they depend on it.
|
||||||
|
Write(*dto.Metric) error
|
||||||
|
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||||
|
// dto.Metric protobuf to save allocations has disappeared. The
|
||||||
|
// signature of this method should be changed to "Write() (*dto.Metric,
|
||||||
|
// error)".
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opts bundles the options for creating most Metric types. Each metric
|
||||||
|
// implementation XXX has its own XXXOpts type, but in most cases, it is just
|
||||||
|
// an alias of this type (which might change when the requirement arises.)
|
||||||
|
//
|
||||||
|
// It is mandatory to set Name to a non-empty string. All other fields are
|
||||||
|
// optional and can safely be left at their zero value, although it is strongly
|
||||||
|
// encouraged to set a Help string.
|
||||||
|
type Opts struct {
|
||||||
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
|
// name of the Metric (created by joining these components with
|
||||||
|
// "_"). Only Name is mandatory, the others merely help structuring the
|
||||||
|
// name. Note that the fully-qualified name of the metric must be a
|
||||||
|
// valid Prometheus metric name.
|
||||||
|
Namespace string
|
||||||
|
Subsystem string
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Help provides information about this metric.
|
||||||
|
//
|
||||||
|
// Metrics with the same fully-qualified name must have the same Help
|
||||||
|
// string.
|
||||||
|
Help string
|
||||||
|
|
||||||
|
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
||||||
|
// with the same fully-qualified name must have the same label names in
|
||||||
|
// their ConstLabels.
|
||||||
|
//
|
||||||
|
// ConstLabels are only used rarely. In particular, do not use them to
|
||||||
|
// attach the same labels to all your metrics. Those use cases are
|
||||||
|
// better covered by target labels set by the scraping Prometheus
|
||||||
|
// server, or by one specific metric (e.g. a build_info or a
|
||||||
|
// machine_role metric). See also
|
||||||
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
|
||||||
|
ConstLabels Labels
|
||||||
|
|
||||||
|
// now is for testing purposes, by default it's time.Now.
|
||||||
|
now func() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildFQName joins the given three name components by "_". Empty name
|
||||||
|
// components are ignored. If the name parameter itself is empty, an empty
|
||||||
|
// string is returned, no matter what. Metric implementations included in this
|
||||||
|
// library use this function internally to generate the fully-qualified metric
|
||||||
|
// name from the name component in their Opts. Users of the library will only
|
||||||
|
// need this function if they implement their own Metric or instantiate a Desc
|
||||||
|
// (with NewDesc) directly.
|
||||||
|
func BuildFQName(namespace, subsystem, name string) string {
|
||||||
|
if name == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case namespace != "" && subsystem != "":
|
||||||
|
return strings.Join([]string{namespace, subsystem, name}, "_")
|
||||||
|
case namespace != "":
|
||||||
|
return strings.Join([]string{namespace, name}, "_")
|
||||||
|
case subsystem != "":
|
||||||
|
return strings.Join([]string{subsystem, name}, "_")
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
type invalidMetric struct {
|
||||||
|
desc *Desc
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInvalidMetric returns a metric whose Write method always returns the
|
||||||
|
// provided error. It is useful if a Collector finds itself unable to collect
|
||||||
|
// a metric and wishes to report an error to the registry.
|
||||||
|
func NewInvalidMetric(desc *Desc, err error) Metric {
|
||||||
|
return &invalidMetric{desc, err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
||||||
|
|
||||||
|
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
||||||
|
|
||||||
|
type timestampedMetric struct {
|
||||||
|
Metric
|
||||||
|
t time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m timestampedMetric) Write(pb *dto.Metric) error {
|
||||||
|
e := m.Metric.Write(pb)
|
||||||
|
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
|
||||||
|
// way that it has an explicit timestamp set to the provided Time. This is only
|
||||||
|
// useful in rare cases as the timestamp of a Prometheus metric should usually
|
||||||
|
// be set by the Prometheus server during scraping. Exceptions include mirroring
|
||||||
|
// metrics with given timestamps from other metric
|
||||||
|
// sources.
|
||||||
|
//
|
||||||
|
// NewMetricWithTimestamp works best with MustNewConstMetric,
|
||||||
|
// MustNewConstHistogram, and MustNewConstSummary, see example.
|
||||||
|
//
|
||||||
|
// Currently, the exposition formats used by Prometheus are limited to
|
||||||
|
// millisecond resolution. Thus, the provided time will be rounded down to the
|
||||||
|
// next full millisecond value.
|
||||||
|
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
|
||||||
|
return timestampedMetric{Metric: m, t: t}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withExemplarsMetric struct {
|
||||||
|
Metric
|
||||||
|
|
||||||
|
exemplars []*dto.Exemplar
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
|
||||||
|
if err := m.Metric.Write(pb); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case pb.Counter != nil:
|
||||||
|
pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
|
||||||
|
case pb.Histogram != nil:
|
||||||
|
for _, e := range m.exemplars {
|
||||||
|
// pb.Histogram.Bucket are sorted by UpperBound.
|
||||||
|
i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
|
||||||
|
return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
|
||||||
|
})
|
||||||
|
if i < len(pb.Histogram.Bucket) {
|
||||||
|
pb.Histogram.Bucket[i].Exemplar = e
|
||||||
|
} else {
|
||||||
|
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
|
||||||
|
b := &dto.Bucket{
|
||||||
|
CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
|
||||||
|
UpperBound: proto.Float64(math.Inf(1)),
|
||||||
|
Exemplar: e,
|
||||||
|
}
|
||||||
|
pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// TODO(bwplotka): Implement Gauge?
|
||||||
|
return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
|
||||||
|
type Exemplar struct {
|
||||||
|
Value float64
|
||||||
|
Labels Labels
|
||||||
|
// Optional.
|
||||||
|
// Default value (time.Time{}) indicates its empty, which should be
|
||||||
|
// understood as time.Now() time at the moment of creation of metric.
|
||||||
|
Timestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
|
||||||
|
// exemplars. Exemplars are validated.
|
||||||
|
//
|
||||||
|
// Only last applicable exemplar is injected from the list.
|
||||||
|
// For example for Counter it means last exemplar is injected.
|
||||||
|
// For Histogram, it means last applicable exemplar for each bucket is injected.
|
||||||
|
//
|
||||||
|
// NewMetricWithExemplars works best with MustNewConstMetric and
|
||||||
|
// MustNewConstHistogram, see example.
|
||||||
|
func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
|
||||||
|
if len(exemplars) == 0 {
|
||||||
|
return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
now = time.Now()
|
||||||
|
exs = make([]*dto.Exemplar, len(exemplars))
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
for i, e := range exemplars {
|
||||||
|
ts := e.Timestamp
|
||||||
|
if ts == (time.Time{}) {
|
||||||
|
ts = now
|
||||||
|
}
|
||||||
|
exs[i], err = newExemplar(e.Value, ts, e.Labels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
|
||||||
|
// NewMetricWithExemplars would have returned an error.
|
||||||
|
func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
|
||||||
|
ret, err := NewMetricWithExemplars(m, exemplars...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
25
vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
generated
vendored
Normal file
25
vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !js || wasm
|
||||||
|
// +build !js wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "runtime"
|
||||||
|
|
||||||
|
// getRuntimeNumThreads returns the number of open OS threads.
|
||||||
|
func getRuntimeNumThreads() float64 {
|
||||||
|
n, _ := runtime.ThreadCreateProfile(nil)
|
||||||
|
return float64(n)
|
||||||
|
}
|
22
vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
generated
vendored
Normal file
22
vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build js && !wasm
|
||||||
|
// +build js,!wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// getRuntimeNumThreads returns the number of open OS threads.
|
||||||
|
func getRuntimeNumThreads() float64 {
|
||||||
|
return 1
|
||||||
|
}
|
64
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
Normal file
64
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// Observer is the interface that wraps the Observe method, which is used by
|
||||||
|
// Histogram and Summary to add observations.
|
||||||
|
type Observer interface {
|
||||||
|
Observe(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ObserverFunc type is an adapter to allow the use of ordinary
|
||||||
|
// functions as Observers. If f is a function with the appropriate
|
||||||
|
// signature, ObserverFunc(f) is an Observer that calls f.
|
||||||
|
//
|
||||||
|
// This adapter is usually used in connection with the Timer type, and there are
|
||||||
|
// two general use cases:
|
||||||
|
//
|
||||||
|
// The most common one is to use a Gauge as the Observer for a Timer.
|
||||||
|
// See the "Gauge" Timer example.
|
||||||
|
//
|
||||||
|
// The more advanced use case is to create a function that dynamically decides
|
||||||
|
// which Observer to use for observing the duration. See the "Complex" Timer
|
||||||
|
// example.
|
||||||
|
type ObserverFunc func(float64)
|
||||||
|
|
||||||
|
// Observe calls f(value). It implements Observer.
|
||||||
|
func (f ObserverFunc) Observe(value float64) {
|
||||||
|
f(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
|
||||||
|
type ObserverVec interface {
|
||||||
|
GetMetricWith(Labels) (Observer, error)
|
||||||
|
GetMetricWithLabelValues(lvs ...string) (Observer, error)
|
||||||
|
With(Labels) Observer
|
||||||
|
WithLabelValues(...string) Observer
|
||||||
|
CurryWith(Labels) (ObserverVec, error)
|
||||||
|
MustCurryWith(Labels) ObserverVec
|
||||||
|
|
||||||
|
Collector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExemplarObserver is implemented by Observers that offer the option of
|
||||||
|
// observing a value together with an exemplar. Its ObserveWithExemplar method
|
||||||
|
// works like the Observe method of an Observer but also replaces the currently
|
||||||
|
// saved exemplar (if any) with a new one, created from the provided value, the
|
||||||
|
// current time as timestamp, and the provided Labels. Empty Labels will lead to
|
||||||
|
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
|
||||||
|
// left in place. ObserveWithExemplar panics if any of the provided labels are
|
||||||
|
// invalid or if the provided labels contain more than 128 runes in total.
|
||||||
|
type ExemplarObserver interface {
|
||||||
|
ObserveWithExemplar(value float64, exemplar Labels)
|
||||||
|
}
|
164
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
Normal file
164
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type processCollector struct {
|
||||||
|
collectFn func(chan<- Metric)
|
||||||
|
pidFn func() (int, error)
|
||||||
|
reportErrors bool
|
||||||
|
cpuTotal *Desc
|
||||||
|
openFDs, maxFDs *Desc
|
||||||
|
vsize, maxVsize *Desc
|
||||||
|
rss *Desc
|
||||||
|
startTime *Desc
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessCollectorOpts defines the behavior of a process metrics collector
|
||||||
|
// created with NewProcessCollector.
|
||||||
|
type ProcessCollectorOpts struct {
|
||||||
|
// PidFn returns the PID of the process the collector collects metrics
|
||||||
|
// for. It is called upon each collection. By default, the PID of the
|
||||||
|
// current process is used, as determined on construction time by
|
||||||
|
// calling os.Getpid().
|
||||||
|
PidFn func() (int, error)
|
||||||
|
// If non-empty, each of the collected metrics is prefixed by the
|
||||||
|
// provided string and an underscore ("_").
|
||||||
|
Namespace string
|
||||||
|
// If true, any error encountered during collection is reported as an
|
||||||
|
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
|
||||||
|
// and the collected metrics will be incomplete. (Possibly, no metrics
|
||||||
|
// will be collected at all.) While that's usually not desired, it is
|
||||||
|
// appropriate for the common "mix-in" of process metrics, where process
|
||||||
|
// metrics are nice to have, but failing to collect them should not
|
||||||
|
// disrupt the collection of the remaining metrics.
|
||||||
|
ReportErrors bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewProcessCollector instead.
|
||||||
|
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||||
|
ns := ""
|
||||||
|
if len(opts.Namespace) > 0 {
|
||||||
|
ns = opts.Namespace + "_"
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &processCollector{
|
||||||
|
reportErrors: opts.ReportErrors,
|
||||||
|
cpuTotal: NewDesc(
|
||||||
|
ns+"process_cpu_seconds_total",
|
||||||
|
"Total user and system CPU time spent in seconds.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
openFDs: NewDesc(
|
||||||
|
ns+"process_open_fds",
|
||||||
|
"Number of open file descriptors.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
maxFDs: NewDesc(
|
||||||
|
ns+"process_max_fds",
|
||||||
|
"Maximum number of open file descriptors.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
vsize: NewDesc(
|
||||||
|
ns+"process_virtual_memory_bytes",
|
||||||
|
"Virtual memory size in bytes.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
maxVsize: NewDesc(
|
||||||
|
ns+"process_virtual_memory_max_bytes",
|
||||||
|
"Maximum amount of virtual memory available in bytes.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
rss: NewDesc(
|
||||||
|
ns+"process_resident_memory_bytes",
|
||||||
|
"Resident memory size in bytes.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
startTime: NewDesc(
|
||||||
|
ns+"process_start_time_seconds",
|
||||||
|
"Start time of the process since unix epoch in seconds.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.PidFn == nil {
|
||||||
|
c.pidFn = getPIDFn()
|
||||||
|
} else {
|
||||||
|
c.pidFn = opts.PidFn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up process metric collection if supported by the runtime.
|
||||||
|
if canCollectProcess() {
|
||||||
|
c.collectFn = c.processCollect
|
||||||
|
} else {
|
||||||
|
c.collectFn = func(ch chan<- Metric) {
|
||||||
|
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe returns all descriptions of the collector.
|
||||||
|
func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||||
|
ch <- c.cpuTotal
|
||||||
|
ch <- c.openFDs
|
||||||
|
ch <- c.maxFDs
|
||||||
|
ch <- c.vsize
|
||||||
|
ch <- c.maxVsize
|
||||||
|
ch <- c.rss
|
||||||
|
ch <- c.startTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the current state of all metrics of the collector.
|
||||||
|
func (c *processCollector) Collect(ch chan<- Metric) {
|
||||||
|
c.collectFn(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||||
|
if !c.reportErrors {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if desc == nil {
|
||||||
|
desc = NewInvalidDesc(err)
|
||||||
|
}
|
||||||
|
ch <- NewInvalidMetric(desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPidFileFn returns a function that retrieves a pid from the specified file.
|
||||||
|
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
|
||||||
|
func NewPidFileFn(pidFilePath string) func() (int, error) {
|
||||||
|
return func() (int, error) {
|
||||||
|
content, err := os.ReadFile(pidFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
|
||||||
|
}
|
||||||
|
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pid, nil
|
||||||
|
}
|
||||||
|
}
|
26
vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
generated
vendored
Normal file
26
vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build js
|
||||||
|
// +build js
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
func canCollectProcess() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
|
// noop on this platform
|
||||||
|
return
|
||||||
|
}
|
66
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
Normal file
66
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !windows && !js
|
||||||
|
// +build !windows,!js
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/procfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func canCollectProcess() bool {
|
||||||
|
_, err := procfs.NewDefaultFS()
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
|
pid, err := c.pidFn()
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := procfs.NewProc(pid)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat, err := p.Stat(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
||||||
|
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
||||||
|
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||||
|
if startTime, err := stat.StartTime(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, c.startTime, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, c.openFDs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if limits, err := p.Limits(); err == nil {
|
||||||
|
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||||
|
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
||||||
|
} else {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
}
|
||||||
|
}
|
116
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
Normal file
116
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
func canCollectProcess() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
modpsapi = syscall.NewLazyDLL("psapi.dll")
|
||||||
|
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
|
||||||
|
procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
|
||||||
|
)
|
||||||
|
|
||||||
|
type processMemoryCounters struct {
|
||||||
|
// System interface description
|
||||||
|
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
|
||||||
|
|
||||||
|
// Refer to the Golang internal implementation
|
||||||
|
// https://golang.org/src/internal/syscall/windows/psapi_windows.go
|
||||||
|
_ uint32
|
||||||
|
PageFaultCount uint32
|
||||||
|
PeakWorkingSetSize uintptr
|
||||||
|
WorkingSetSize uintptr
|
||||||
|
QuotaPeakPagedPoolUsage uintptr
|
||||||
|
QuotaPagedPoolUsage uintptr
|
||||||
|
QuotaPeakNonPagedPoolUsage uintptr
|
||||||
|
QuotaNonPagedPoolUsage uintptr
|
||||||
|
PagefileUsage uintptr
|
||||||
|
PeakPagefileUsage uintptr
|
||||||
|
PrivateUsage uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
|
||||||
|
mem := processMemoryCounters{}
|
||||||
|
r1, _, err := procGetProcessMemoryInfo.Call(
|
||||||
|
uintptr(handle),
|
||||||
|
uintptr(unsafe.Pointer(&mem)),
|
||||||
|
uintptr(unsafe.Sizeof(mem)),
|
||||||
|
)
|
||||||
|
if r1 != 1 {
|
||||||
|
return mem, err
|
||||||
|
} else {
|
||||||
|
return mem, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProcessHandleCount(handle windows.Handle) (uint32, error) {
|
||||||
|
var count uint32
|
||||||
|
r1, _, err := procGetProcessHandleCount.Call(
|
||||||
|
uintptr(handle),
|
||||||
|
uintptr(unsafe.Pointer(&count)),
|
||||||
|
)
|
||||||
|
if r1 != 1 {
|
||||||
|
return 0, err
|
||||||
|
} else {
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
|
h, err := windows.GetCurrentProcess()
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var startTime, exitTime, kernelTime, userTime windows.Filetime
|
||||||
|
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
|
||||||
|
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
|
||||||
|
|
||||||
|
mem, err := getProcessMemoryInfo(h)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
|
||||||
|
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
|
||||||
|
|
||||||
|
handles, err := getProcessHandleCount(h)
|
||||||
|
if err != nil {
|
||||||
|
c.reportError(ch, nil, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
|
||||||
|
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileTimeToSeconds(ft windows.Filetime) float64 {
|
||||||
|
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
|
||||||
|
}
|
374
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
Normal file
374
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
Normal file
|
@ -0,0 +1,374 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
closeNotifier = 1 << iota
|
||||||
|
flusher
|
||||||
|
hijacker
|
||||||
|
readerFrom
|
||||||
|
pusher
|
||||||
|
)
|
||||||
|
|
||||||
|
type delegator interface {
|
||||||
|
http.ResponseWriter
|
||||||
|
|
||||||
|
Status() int
|
||||||
|
Written() int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type responseWriterDelegator struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
|
||||||
|
status int
|
||||||
|
written int64
|
||||||
|
wroteHeader bool
|
||||||
|
observeWriteHeader func(int)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseWriterDelegator) Status() int {
|
||||||
|
return r.status
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseWriterDelegator) Written() int64 {
|
||||||
|
return r.written
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||||
|
if r.observeWriteHeader != nil && !r.wroteHeader {
|
||||||
|
// Only call observeWriteHeader for the 1st time. It's a bug if
|
||||||
|
// WriteHeader is called more than once, but we want to protect
|
||||||
|
// against it here. Note that we still delegate the WriteHeader
|
||||||
|
// to the original ResponseWriter to not mask the bug from it.
|
||||||
|
r.observeWriteHeader(code)
|
||||||
|
}
|
||||||
|
r.status = code
|
||||||
|
r.wroteHeader = true
|
||||||
|
r.ResponseWriter.WriteHeader(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||||
|
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||||
|
// handled appropriately.
|
||||||
|
if !r.wroteHeader {
|
||||||
|
r.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
n, err := r.ResponseWriter.Write(b)
|
||||||
|
r.written += int64(n)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
closeNotifierDelegator struct{ *responseWriterDelegator }
|
||||||
|
flusherDelegator struct{ *responseWriterDelegator }
|
||||||
|
hijackerDelegator struct{ *responseWriterDelegator }
|
||||||
|
readerFromDelegator struct{ *responseWriterDelegator }
|
||||||
|
pusherDelegator struct{ *responseWriterDelegator }
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||||
|
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
|
||||||
|
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d flusherDelegator) Flush() {
|
||||||
|
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||||
|
// handled appropriately.
|
||||||
|
if !d.wroteHeader {
|
||||||
|
d.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
d.ResponseWriter.(http.Flusher).Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
|
return d.ResponseWriter.(http.Hijacker).Hijack()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||||
|
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||||
|
// handled appropriately.
|
||||||
|
if !d.wroteHeader {
|
||||||
|
d.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
|
||||||
|
d.written += n
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||||
|
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// TODO(beorn7): Code generation would help here.
|
||||||
|
pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
|
||||||
|
return closeNotifierDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
|
||||||
|
return flusherDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
|
||||||
|
return hijackerDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
|
||||||
|
return readerFromDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
}{d, readerFromDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
||||||
|
return pusherDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||||
|
d := &responseWriterDelegator{
|
||||||
|
ResponseWriter: w,
|
||||||
|
observeWriteHeader: observeWriteHeaderFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
id := 0
|
||||||
|
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
|
||||||
|
if _, ok := w.(http.CloseNotifier); ok {
|
||||||
|
id += closeNotifier
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Flusher); ok {
|
||||||
|
id += flusher
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Hijacker); ok {
|
||||||
|
id += hijacker
|
||||||
|
}
|
||||||
|
if _, ok := w.(io.ReaderFrom); ok {
|
||||||
|
id += readerFrom
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Pusher); ok {
|
||||||
|
id += pusher
|
||||||
|
}
|
||||||
|
|
||||||
|
return pickDelegator[id](d)
|
||||||
|
}
|
408
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
Normal file
408
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
Normal file
|
@ -0,0 +1,408 @@
|
||||||
|
// Copyright 2016 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package promhttp provides tooling around HTTP servers and clients.
|
||||||
|
//
|
||||||
|
// First, the package allows the creation of http.Handler instances to expose
|
||||||
|
// Prometheus metrics via HTTP. promhttp.Handler acts on the
|
||||||
|
// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
|
||||||
|
// custom registry or anything that implements the Gatherer interface. It also
|
||||||
|
// allows the creation of handlers that act differently on errors or allow to
|
||||||
|
// log errors.
|
||||||
|
//
|
||||||
|
// Second, the package provides tooling to instrument instances of http.Handler
|
||||||
|
// via middleware. Middleware wrappers follow the naming scheme
|
||||||
|
// InstrumentHandlerX, where X describes the intended use of the middleware.
|
||||||
|
// See each function's doc comment for specific details.
|
||||||
|
//
|
||||||
|
// Finally, the package allows for an http.RoundTripper to be instrumented via
|
||||||
|
// middleware. Middleware wrappers follow the naming scheme
|
||||||
|
// InstrumentRoundTripperX, where X describes the intended use of the
|
||||||
|
// middleware. See each function's doc comment for specific details.
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/expfmt"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
contentTypeHeader = "Content-Type"
|
||||||
|
contentEncodingHeader = "Content-Encoding"
|
||||||
|
acceptEncodingHeader = "Accept-Encoding"
|
||||||
|
processStartTimeHeader = "Process-Start-Time-Unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var gzipPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return gzip.NewWriter(nil)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
|
||||||
|
// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
|
||||||
|
// no error logging, and it applies compression if requested by the client.
|
||||||
|
//
|
||||||
|
// The returned http.Handler is already instrumented using the
|
||||||
|
// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
|
||||||
|
// create multiple http.Handlers by separate calls of the Handler function, the
|
||||||
|
// metrics used for instrumentation will be shared between them, providing
|
||||||
|
// global scrape counts.
|
||||||
|
//
|
||||||
|
// This function is meant to cover the bulk of basic use cases. If you are doing
|
||||||
|
// anything that requires more customization (including using a non-default
|
||||||
|
// Gatherer, different instrumentation, and non-default HandlerOpts), use the
|
||||||
|
// HandlerFor function. See there for details.
|
||||||
|
func Handler() http.Handler {
|
||||||
|
return InstrumentMetricHandler(
|
||||||
|
prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerFor returns an uninstrumented http.Handler for the provided
|
||||||
|
// Gatherer. The behavior of the Handler is defined by the provided
|
||||||
|
// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
|
||||||
|
// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
|
||||||
|
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
||||||
|
// kind of instrumentation as it is used by the Handler function.
|
||||||
|
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||||
|
return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
|
||||||
|
// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
|
||||||
|
// call to `done` of that `Gather`.
|
||||||
|
func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
|
||||||
|
var (
|
||||||
|
inFlightSem chan struct{}
|
||||||
|
errCnt = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "promhttp_metric_handler_errors_total",
|
||||||
|
Help: "Total number of internal errors encountered by the promhttp metric handler.",
|
||||||
|
},
|
||||||
|
[]string{"cause"},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if opts.MaxRequestsInFlight > 0 {
|
||||||
|
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
|
||||||
|
}
|
||||||
|
if opts.Registry != nil {
|
||||||
|
// Initialize all possibilities that can occur below.
|
||||||
|
errCnt.WithLabelValues("gathering")
|
||||||
|
errCnt.WithLabelValues("encoding")
|
||||||
|
if err := opts.Registry.Register(errCnt); err != nil {
|
||||||
|
are := &prometheus.AlreadyRegisteredError{}
|
||||||
|
if errors.As(err, are) {
|
||||||
|
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||||
|
if !opts.ProcessStartTime.IsZero() {
|
||||||
|
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
|
||||||
|
}
|
||||||
|
if inFlightSem != nil {
|
||||||
|
select {
|
||||||
|
case inFlightSem <- struct{}{}: // All good, carry on.
|
||||||
|
defer func() { <-inFlightSem }()
|
||||||
|
default:
|
||||||
|
http.Error(rsp, fmt.Sprintf(
|
||||||
|
"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
|
||||||
|
), http.StatusServiceUnavailable)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mfs, done, err := reg.Gather()
|
||||||
|
defer done()
|
||||||
|
if err != nil {
|
||||||
|
if opts.ErrorLog != nil {
|
||||||
|
opts.ErrorLog.Println("error gathering metrics:", err)
|
||||||
|
}
|
||||||
|
errCnt.WithLabelValues("gathering").Inc()
|
||||||
|
switch opts.ErrorHandling {
|
||||||
|
case PanicOnError:
|
||||||
|
panic(err)
|
||||||
|
case ContinueOnError:
|
||||||
|
if len(mfs) == 0 {
|
||||||
|
// Still report the error if no metrics have been gathered.
|
||||||
|
httpError(rsp, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case HTTPErrorOnError:
|
||||||
|
httpError(rsp, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var contentType expfmt.Format
|
||||||
|
if opts.EnableOpenMetrics {
|
||||||
|
contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
|
||||||
|
} else {
|
||||||
|
contentType = expfmt.Negotiate(req.Header)
|
||||||
|
}
|
||||||
|
header := rsp.Header()
|
||||||
|
header.Set(contentTypeHeader, string(contentType))
|
||||||
|
|
||||||
|
w := io.Writer(rsp)
|
||||||
|
if !opts.DisableCompression && gzipAccepted(req.Header) {
|
||||||
|
header.Set(contentEncodingHeader, "gzip")
|
||||||
|
gz := gzipPool.Get().(*gzip.Writer)
|
||||||
|
defer gzipPool.Put(gz)
|
||||||
|
|
||||||
|
gz.Reset(w)
|
||||||
|
defer gz.Close()
|
||||||
|
|
||||||
|
w = gz
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := expfmt.NewEncoder(w, contentType)
|
||||||
|
|
||||||
|
// handleError handles the error according to opts.ErrorHandling
|
||||||
|
// and returns true if we have to abort after the handling.
|
||||||
|
handleError := func(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if opts.ErrorLog != nil {
|
||||||
|
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
||||||
|
}
|
||||||
|
errCnt.WithLabelValues("encoding").Inc()
|
||||||
|
switch opts.ErrorHandling {
|
||||||
|
case PanicOnError:
|
||||||
|
panic(err)
|
||||||
|
case HTTPErrorOnError:
|
||||||
|
// We cannot really send an HTTP error at this
|
||||||
|
// point because we most likely have written
|
||||||
|
// something to rsp already. But at least we can
|
||||||
|
// stop sending.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Do nothing in all other cases, including ContinueOnError.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, mf := range mfs {
|
||||||
|
if handleError(enc.Encode(mf)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if closer, ok := enc.(expfmt.Closer); ok {
|
||||||
|
// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
|
||||||
|
if handleError(closer.Close()) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if opts.Timeout <= 0 {
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
|
||||||
|
"Exceeded configured timeout of %v.\n",
|
||||||
|
opts.Timeout,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentMetricHandler is usually used with an http.Handler returned by the
|
||||||
|
// HandlerFor function. It instruments the provided http.Handler with two
|
||||||
|
// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
|
||||||
|
// scrapes partitioned by HTTP status code, and a gauge
|
||||||
|
// "promhttp_metric_handler_requests_in_flight" to track the number of
|
||||||
|
// simultaneous scrapes. This function idempotently registers collectors for
|
||||||
|
// both metrics with the provided Registerer. It panics if the registration
|
||||||
|
// fails. The provided metrics are useful to see how many scrapes hit the
|
||||||
|
// monitored target (which could be from different Prometheus servers or other
|
||||||
|
// scrapers), and how often they overlap (which would result in more than one
|
||||||
|
// scrape in flight at the same time). Note that the scrapes-in-flight gauge
|
||||||
|
// will contain the scrape by which it is exposed, while the scrape counter will
|
||||||
|
// only get incremented after the scrape is complete (as only then the status
|
||||||
|
// code is known). For tracking scrape durations, use the
|
||||||
|
// "scrape_duration_seconds" gauge created by the Prometheus server upon each
|
||||||
|
// scrape.
|
||||||
|
func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
|
||||||
|
cnt := prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "promhttp_metric_handler_requests_total",
|
||||||
|
Help: "Total number of scrapes by HTTP status code.",
|
||||||
|
},
|
||||||
|
[]string{"code"},
|
||||||
|
)
|
||||||
|
// Initialize the most likely HTTP status codes.
|
||||||
|
cnt.WithLabelValues("200")
|
||||||
|
cnt.WithLabelValues("500")
|
||||||
|
cnt.WithLabelValues("503")
|
||||||
|
if err := reg.Register(cnt); err != nil {
|
||||||
|
are := &prometheus.AlreadyRegisteredError{}
|
||||||
|
if errors.As(err, are) {
|
||||||
|
cnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gge := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "promhttp_metric_handler_requests_in_flight",
|
||||||
|
Help: "Current number of scrapes being served.",
|
||||||
|
})
|
||||||
|
if err := reg.Register(gge); err != nil {
|
||||||
|
are := &prometheus.AlreadyRegisteredError{}
|
||||||
|
if errors.As(err, are) {
|
||||||
|
gge = are.ExistingCollector.(prometheus.Gauge)
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
||||||
|
// errors.
|
||||||
|
type HandlerErrorHandling int
|
||||||
|
|
||||||
|
// These constants cause handlers serving metrics to behave as described if
|
||||||
|
// errors are encountered.
|
||||||
|
const (
|
||||||
|
// Serve an HTTP status code 500 upon the first error
|
||||||
|
// encountered. Report the error message in the body. Note that HTTP
|
||||||
|
// errors cannot be served anymore once the beginning of a regular
|
||||||
|
// payload has been sent. Thus, in the (unlikely) case that encoding the
|
||||||
|
// payload into the negotiated wire format fails, serving the response
|
||||||
|
// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
|
||||||
|
// those errors.
|
||||||
|
HTTPErrorOnError HandlerErrorHandling = iota
|
||||||
|
// Ignore errors and try to serve as many metrics as possible. However,
|
||||||
|
// if no metrics can be served, serve an HTTP status code 500 and the
|
||||||
|
// last error message in the body. Only use this in deliberate "best
|
||||||
|
// effort" metrics collection scenarios. In this case, it is highly
|
||||||
|
// recommended to provide other means of detecting errors: By setting an
|
||||||
|
// ErrorLog in HandlerOpts, the errors are logged. By providing a
|
||||||
|
// Registry in HandlerOpts, the exposed metrics include an error counter
|
||||||
|
// "promhttp_metric_handler_errors_total", which can be used for
|
||||||
|
// alerts.
|
||||||
|
ContinueOnError
|
||||||
|
// Panic upon the first error encountered (useful for "crash only" apps).
|
||||||
|
PanicOnError
|
||||||
|
)
|
||||||
|
|
||||||
|
// Logger is the minimal interface HandlerOpts needs for logging. Note that
|
||||||
|
// log.Logger from the standard library implements this interface, and it is
|
||||||
|
// easy to implement by custom loggers, if they don't do so already anyway.
|
||||||
|
type Logger interface {
|
||||||
|
Println(v ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
|
||||||
|
// zero value of HandlerOpts is a reasonable default.
|
||||||
|
type HandlerOpts struct {
|
||||||
|
// ErrorLog specifies an optional Logger for errors collecting and
|
||||||
|
// serving metrics. If nil, errors are not logged at all. Note that the
|
||||||
|
// type of a reported error is often prometheus.MultiError, which
|
||||||
|
// formats into a multi-line error string. If you want to avoid the
|
||||||
|
// latter, create a Logger implementation that detects a
|
||||||
|
// prometheus.MultiError and formats the contained errors into one line.
|
||||||
|
ErrorLog Logger
|
||||||
|
// ErrorHandling defines how errors are handled. Note that errors are
|
||||||
|
// logged regardless of the configured ErrorHandling provided ErrorLog
|
||||||
|
// is not nil.
|
||||||
|
ErrorHandling HandlerErrorHandling
|
||||||
|
// If Registry is not nil, it is used to register a metric
|
||||||
|
// "promhttp_metric_handler_errors_total", partitioned by "cause". A
|
||||||
|
// failed registration causes a panic. Note that this error counter is
|
||||||
|
// different from the instrumentation you get from the various
|
||||||
|
// InstrumentHandler... helpers. It counts errors that don't necessarily
|
||||||
|
// result in a non-2xx HTTP status code. There are two typical cases:
|
||||||
|
// (1) Encoding errors that only happen after streaming of the HTTP body
|
||||||
|
// has already started (and the status code 200 has been sent). This
|
||||||
|
// should only happen with custom collectors. (2) Collection errors with
|
||||||
|
// no effect on the HTTP status code because ErrorHandling is set to
|
||||||
|
// ContinueOnError.
|
||||||
|
Registry prometheus.Registerer
|
||||||
|
// If DisableCompression is true, the handler will never compress the
|
||||||
|
// response, even if requested by the client.
|
||||||
|
DisableCompression bool
|
||||||
|
// The number of concurrent HTTP requests is limited to
|
||||||
|
// MaxRequestsInFlight. Additional requests are responded to with 503
|
||||||
|
// Service Unavailable and a suitable message in the body. If
|
||||||
|
// MaxRequestsInFlight is 0 or negative, no limit is applied.
|
||||||
|
MaxRequestsInFlight int
|
||||||
|
// If handling a request takes longer than Timeout, it is responded to
|
||||||
|
// with 503 ServiceUnavailable and a suitable Message. No timeout is
|
||||||
|
// applied if Timeout is 0 or negative. Note that with the current
|
||||||
|
// implementation, reaching the timeout simply ends the HTTP requests as
|
||||||
|
// described above (and even that only if sending of the body hasn't
|
||||||
|
// started yet), while the bulk work of gathering all the metrics keeps
|
||||||
|
// running in the background (with the eventual result to be thrown
|
||||||
|
// away). Until the implementation is improved, it is recommended to
|
||||||
|
// implement a separate timeout in potentially slow Collectors.
|
||||||
|
Timeout time.Duration
|
||||||
|
// If true, the experimental OpenMetrics encoding is added to the
|
||||||
|
// possible options during content negotiation. Note that Prometheus
|
||||||
|
// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
|
||||||
|
// the only way to transmit exemplars. However, the move to OpenMetrics
|
||||||
|
// is not completely transparent. Most notably, the values of "quantile"
|
||||||
|
// labels of Summaries and "le" labels of Histograms are formatted with
|
||||||
|
// a trailing ".0" if they would otherwise look like integer numbers
|
||||||
|
// (which changes the identity of the resulting series on the Prometheus
|
||||||
|
// server).
|
||||||
|
EnableOpenMetrics bool
|
||||||
|
// ProcessStartTime allows setting process start timevalue that will be exposed
|
||||||
|
// with "Process-Start-Time-Unix" response header along with the metrics
|
||||||
|
// payload. This allow callers to have efficient transformations to cumulative
|
||||||
|
// counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
|
||||||
|
// scrape target.
|
||||||
|
// NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
|
||||||
|
// exposition format.
|
||||||
|
ProcessStartTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||||
|
func gzipAccepted(header http.Header) bool {
|
||||||
|
a := header.Get(acceptEncodingHeader)
|
||||||
|
parts := strings.Split(a, ",")
|
||||||
|
for _, part := range parts {
|
||||||
|
part = strings.TrimSpace(part)
|
||||||
|
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpError removes any content-encoding header and then calls http.Error with
|
||||||
|
// the provided error and http.StatusInternalServerError. Error contents is
|
||||||
|
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
|
||||||
|
// must not be called if the header or any payload has already been sent.
|
||||||
|
func httpError(rsp http.ResponseWriter, err error) {
|
||||||
|
rsp.Header().Del(contentEncodingHeader)
|
||||||
|
http.Error(
|
||||||
|
rsp,
|
||||||
|
"An error has occurred while serving metrics:\n\n"+err.Error(),
|
||||||
|
http.StatusInternalServerError,
|
||||||
|
)
|
||||||
|
}
|
249
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
Normal file
249
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
Normal file
|
@ -0,0 +1,249 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The RoundTripperFunc type is an adapter to allow the use of ordinary
|
||||||
|
// functions as RoundTrippers. If f is a function with the appropriate
|
||||||
|
// signature, RountTripperFunc(f) is a RoundTripper that calls f.
|
||||||
|
type RoundTripperFunc func(req *http.Request) (*http.Response, error)
|
||||||
|
|
||||||
|
// RoundTrip implements the RoundTripper interface.
|
||||||
|
func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||||
|
return rt(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperInFlight is a middleware that wraps the provided
|
||||||
|
// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
|
||||||
|
// requests currently handled by the wrapped http.RoundTripper.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
|
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
|
||||||
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
|
gauge.Inc()
|
||||||
|
defer gauge.Dec()
|
||||||
|
return next.RoundTrip(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperCounter is a middleware that wraps the provided
|
||||||
|
// http.RoundTripper to observe the request result with the provided CounterVec.
|
||||||
|
// The CounterVec must have zero, one, or two non-const non-curried labels. For
|
||||||
|
// those, the only allowed label names are "code" and "method". The function
|
||||||
|
// panics otherwise. For the "method" label a predefined default label value set
|
||||||
|
// is used to filter given values. Values besides predefined values will count
|
||||||
|
// as `unknown` method.`WithExtraMethods` can be used to add more
|
||||||
|
// methods to the set. Partitioning of the CounterVec happens by HTTP status code
|
||||||
|
// and/or HTTP method if the respective instance label names are present in the
|
||||||
|
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
|
||||||
|
//
|
||||||
|
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
|
||||||
|
// is not incremented.
|
||||||
|
//
|
||||||
|
// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
|
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
|
||||||
|
rtOpts := defaultOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o.apply(rtOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Curry the counter with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
|
resp, err := next.RoundTrip(r)
|
||||||
|
if err == nil {
|
||||||
|
l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
|
||||||
|
for label, resolve := range rtOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(resp.Request.Context())
|
||||||
|
}
|
||||||
|
addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperDuration is a middleware that wraps the provided
|
||||||
|
// http.RoundTripper to observe the request duration with the provided
|
||||||
|
// ObserverVec. The ObserverVec must have zero, one, or two non-const
|
||||||
|
// non-curried labels. For those, the only allowed label names are "code" and
|
||||||
|
// "method". The function panics otherwise. For the "method" label a predefined
|
||||||
|
// default label value set is used to filter given values. Values besides
|
||||||
|
// predefined values will count as `unknown` method. `WithExtraMethods`
|
||||||
|
// can be used to add more methods to the set. The Observe method of the Observer
|
||||||
|
// in the ObserverVec is called with the request duration in
|
||||||
|
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||||
|
// respective instance label names are present in the ObserverVec. For
|
||||||
|
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||||
|
// partitioning of Histograms is expensive and should be used judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
|
||||||
|
// reported.
|
||||||
|
//
|
||||||
|
// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
|
||||||
|
//
|
||||||
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
|
// if used with Go1.9+.
|
||||||
|
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
|
||||||
|
rtOpts := defaultOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o.apply(rtOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
|
start := time.Now()
|
||||||
|
resp, err := next.RoundTrip(r)
|
||||||
|
if err == nil {
|
||||||
|
l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
|
||||||
|
for label, resolve := range rtOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(resp.Request.Context())
|
||||||
|
}
|
||||||
|
observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
||||||
|
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
||||||
|
// representing the time in seconds since the start of the http request. A user
|
||||||
|
// may choose to use separately buckets Histograms, or implement custom
|
||||||
|
// instance labels on a per function basis.
|
||||||
|
type InstrumentTrace struct {
|
||||||
|
GotConn func(float64)
|
||||||
|
PutIdleConn func(float64)
|
||||||
|
GotFirstResponseByte func(float64)
|
||||||
|
Got100Continue func(float64)
|
||||||
|
DNSStart func(float64)
|
||||||
|
DNSDone func(float64)
|
||||||
|
ConnectStart func(float64)
|
||||||
|
ConnectDone func(float64)
|
||||||
|
TLSHandshakeStart func(float64)
|
||||||
|
TLSHandshakeDone func(float64)
|
||||||
|
WroteHeaders func(float64)
|
||||||
|
Wait100Continue func(float64)
|
||||||
|
WroteRequest func(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
||||||
|
// RoundTripper and reports times to hook functions provided in the
|
||||||
|
// InstrumentTrace struct. Hook functions that are not present in the provided
|
||||||
|
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
||||||
|
// time since the start of the request. Only with Go1.9+, those times are
|
||||||
|
// guaranteed to never be negative. (Earlier Go versions are not using a
|
||||||
|
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
||||||
|
// should be used judiciously.
|
||||||
|
//
|
||||||
|
// For hook functions that receive an error as an argument, no observations are
|
||||||
|
// made in the event of a non-nil error value.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
|
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
||||||
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
trace := &httptrace.ClientTrace{
|
||||||
|
GotConn: func(_ httptrace.GotConnInfo) {
|
||||||
|
if it.GotConn != nil {
|
||||||
|
it.GotConn(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
PutIdleConn: func(err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.PutIdleConn != nil {
|
||||||
|
it.PutIdleConn(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DNSStart: func(_ httptrace.DNSStartInfo) {
|
||||||
|
if it.DNSStart != nil {
|
||||||
|
it.DNSStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||||
|
if it.DNSDone != nil {
|
||||||
|
it.DNSDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConnectStart: func(_, _ string) {
|
||||||
|
if it.ConnectStart != nil {
|
||||||
|
it.ConnectStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConnectDone: func(_, _ string, err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.ConnectDone != nil {
|
||||||
|
it.ConnectDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
GotFirstResponseByte: func() {
|
||||||
|
if it.GotFirstResponseByte != nil {
|
||||||
|
it.GotFirstResponseByte(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Got100Continue: func() {
|
||||||
|
if it.Got100Continue != nil {
|
||||||
|
it.Got100Continue(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TLSHandshakeStart: func() {
|
||||||
|
if it.TLSHandshakeStart != nil {
|
||||||
|
it.TLSHandshakeStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.TLSHandshakeDone != nil {
|
||||||
|
it.TLSHandshakeDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
WroteHeaders: func() {
|
||||||
|
if it.WroteHeaders != nil {
|
||||||
|
it.WroteHeaders(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Wait100Continue: func() {
|
||||||
|
if it.Wait100Continue != nil {
|
||||||
|
it.Wait100Continue(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
||||||
|
if it.WroteRequest != nil {
|
||||||
|
it.WroteRequest(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
|
||||||
|
|
||||||
|
return next.RoundTrip(r)
|
||||||
|
}
|
||||||
|
}
|
576
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
Normal file
576
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
Normal file
|
@ -0,0 +1,576 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
|
||||||
|
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
|
||||||
|
|
||||||
|
// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
|
||||||
|
// which falls back to [prometheus.Observer.Observe] if no labels are provided.
|
||||||
|
func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
|
||||||
|
if labels == nil {
|
||||||
|
obs.Observe(val)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
|
||||||
|
// which falls back to [prometheus.Counter.Add] if no labels are provided.
|
||||||
|
func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
|
||||||
|
if labels == nil {
|
||||||
|
obs.Add(val)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerInFlight is a middleware that wraps the provided
|
||||||
|
// http.Handler. It sets the provided prometheus.Gauge to the number of
|
||||||
|
// requests currently handled by the wrapped http.Handler.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
g.Inc()
|
||||||
|
defer g.Dec()
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerDuration is a middleware that wraps the provided
|
||||||
|
// http.Handler to observe the request duration with the provided ObserverVec.
|
||||||
|
// The ObserverVec must have valid metric and label names and must have zero,
|
||||||
|
// one, or two non-const non-curried labels. For those, the only allowed label
|
||||||
|
// names are "code" and "method". The function panics otherwise. For the "method"
|
||||||
|
// label a predefined default label value set is used to filter given values.
|
||||||
|
// Values besides predefined values will count as `unknown` method.
|
||||||
|
// `WithExtraMethods` can be used to add more methods to the set. The Observe
|
||||||
|
// method of the Observer in the ObserverVec is called with the request duration
|
||||||
|
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
|
||||||
|
// the respective instance label names are present in the ObserverVec. For
|
||||||
|
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||||
|
// partitioning of Histograms is expensive and should be used judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics, no values are reported.
|
||||||
|
//
|
||||||
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
|
// if used with Go1.9+.
|
||||||
|
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
|
hOpts := defaultOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o.apply(hOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
|
if code {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
now := time.Now()
|
||||||
|
d := newDelegator(w, nil)
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
|
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
||||||
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(r.Context())
|
||||||
|
}
|
||||||
|
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
now := time.Now()
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
|
||||||
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(r.Context())
|
||||||
|
}
|
||||||
|
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
|
||||||
|
// to observe the request result with the provided CounterVec. The CounterVec
|
||||||
|
// must have valid metric and label names and must have zero, one, or two
|
||||||
|
// non-const non-curried labels. For those, the only allowed label names are
|
||||||
|
// "code" and "method". The function panics otherwise. For the "method"
|
||||||
|
// label a predefined default label value set is used to filter given values.
|
||||||
|
// Values besides predefined values will count as `unknown` method.
|
||||||
|
// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the
|
||||||
|
// CounterVec happens by HTTP status code and/or HTTP method if the respective
|
||||||
|
// instance label names are present in the CounterVec. For unpartitioned
|
||||||
|
// counting, use a CounterVec with zero labels.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics, the Counter is not incremented.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
|
hOpts := defaultOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o.apply(hOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Curry the counter with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
|
if code {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
d := newDelegator(w, nil)
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
|
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
||||||
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(r.Context())
|
||||||
|
}
|
||||||
|
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
|
||||||
|
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
|
||||||
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(r.Context())
|
||||||
|
}
|
||||||
|
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
|
||||||
|
// http.Handler to observe with the provided ObserverVec the request duration
|
||||||
|
// until the response headers are written. The ObserverVec must have valid
|
||||||
|
// metric and label names and must have zero, one, or two non-const non-curried
|
||||||
|
// labels. For those, the only allowed label names are "code" and "method". The
|
||||||
|
// function panics otherwise. For the "method" label a predefined default label
|
||||||
|
// value set is used to filter given values. Values besides predefined values
|
||||||
|
// will count as `unknown` method.`WithExtraMethods` can be used to add more
|
||||||
|
// methods to the set. The Observe method of the Observer in the
|
||||||
|
// ObserverVec is called with the request duration in seconds. Partitioning
|
||||||
|
// happens by HTTP status code and/or HTTP method if the respective instance
|
||||||
|
// label names are present in the ObserverVec. For unpartitioned observations,
|
||||||
|
// use an ObserverVec with zero labels. Note that partitioning of Histograms is
|
||||||
|
// expensive and should be used judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics before calling WriteHeader, no value is
|
||||||
|
// reported.
|
||||||
|
//
|
||||||
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
|
// if used with Go1.9+.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
|
hOpts := defaultOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o.apply(hOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
now := time.Now()
|
||||||
|
d := newDelegator(w, func(status int) {
|
||||||
|
l := labels(code, method, r.Method, status, hOpts.extraMethods...)
|
||||||
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(r.Context())
|
||||||
|
}
|
||||||
|
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
|
||||||
|
})
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerRequestSize is a middleware that wraps the provided
|
||||||
|
// http.Handler to observe the request size with the provided ObserverVec. The
|
||||||
|
// ObserverVec must have valid metric and label names and must have zero, one,
|
||||||
|
// or two non-const non-curried labels. For those, the only allowed label names
|
||||||
|
// are "code" and "method". The function panics otherwise. For the "method"
|
||||||
|
// label a predefined default label value set is used to filter given values.
|
||||||
|
// Values besides predefined values will count as `unknown` method.
|
||||||
|
// `WithExtraMethods` can be used to add more methods to the set. The Observe
|
||||||
|
// method of the Observer in the ObserverVec is called with the request size in
|
||||||
|
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||||
|
// respective instance label names are present in the ObserverVec. For
|
||||||
|
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||||
|
// partitioning of Histograms is expensive and should be used judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics, no values are reported.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
|
hOpts := defaultOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o.apply(hOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
|
if code {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
d := newDelegator(w, nil)
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
size := computeApproximateRequestSize(r)
|
||||||
|
|
||||||
|
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
||||||
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(r.Context())
|
||||||
|
}
|
||||||
|
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
size := computeApproximateRequestSize(r)
|
||||||
|
|
||||||
|
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
|
||||||
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(r.Context())
|
||||||
|
}
|
||||||
|
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentHandlerResponseSize is a middleware that wraps the provided
|
||||||
|
// http.Handler to observe the response size with the provided ObserverVec. The
|
||||||
|
// ObserverVec must have valid metric and label names and must have zero, one,
|
||||||
|
// or two non-const non-curried labels. For those, the only allowed label names
|
||||||
|
// are "code" and "method". The function panics otherwise. For the "method"
|
||||||
|
// label a predefined default label value set is used to filter given values.
|
||||||
|
// Values besides predefined values will count as `unknown` method.
|
||||||
|
// `WithExtraMethods` can be used to add more methods to the set. The Observe
|
||||||
|
// method of the Observer in the ObserverVec is called with the response size in
|
||||||
|
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||||
|
// respective instance label names are present in the ObserverVec. For
|
||||||
|
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||||
|
// partitioning of Histograms is expensive and should be used judiciously.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||||
|
//
|
||||||
|
// If the wrapped Handler panics, no values are reported.
|
||||||
|
//
|
||||||
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
|
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
|
||||||
|
hOpts := defaultOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o.apply(hOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
d := newDelegator(w, nil)
|
||||||
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
|
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
||||||
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
|
l[label] = resolve(r.Context())
|
||||||
|
}
|
||||||
|
observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkLabels returns whether the provided Collector has a non-const,
|
||||||
|
// non-curried label named "code" and/or "method". It panics if the provided
|
||||||
|
// Collector does not have a Desc or has more than one Desc or its Desc is
|
||||||
|
// invalid. It also panics if the Collector has any non-const, non-curried
|
||||||
|
// labels that are not named "code" or "method".
|
||||||
|
func checkLabels(c prometheus.Collector) (code, method bool) {
|
||||||
|
// TODO(beorn7): Remove this hacky way to check for instance labels
|
||||||
|
// once Descriptors can have their dimensionality queried.
|
||||||
|
var (
|
||||||
|
desc *prometheus.Desc
|
||||||
|
m prometheus.Metric
|
||||||
|
pm dto.Metric
|
||||||
|
lvs []string
|
||||||
|
)
|
||||||
|
|
||||||
|
// Get the Desc from the Collector.
|
||||||
|
descc := make(chan *prometheus.Desc, 1)
|
||||||
|
c.Describe(descc)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case desc = <-descc:
|
||||||
|
default:
|
||||||
|
panic("no description provided by collector")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-descc:
|
||||||
|
panic("more than one description provided by collector")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
close(descc)
|
||||||
|
|
||||||
|
// Make sure the Collector has a valid Desc by registering it with a
|
||||||
|
// temporary registry.
|
||||||
|
prometheus.NewRegistry().MustRegister(c)
|
||||||
|
|
||||||
|
// Create a ConstMetric with the Desc. Since we don't know how many
|
||||||
|
// variable labels there are, try for as long as it needs.
|
||||||
|
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
|
||||||
|
m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write out the metric into a proto message and look at the labels.
|
||||||
|
// If the value is not the magicString, it is a constLabel, which doesn't interest us.
|
||||||
|
// If the label is curried, it doesn't interest us.
|
||||||
|
// In all other cases, only "code" or "method" is allowed.
|
||||||
|
if err := m.Write(&pm); err != nil {
|
||||||
|
panic("error checking metric for labels")
|
||||||
|
}
|
||||||
|
for _, label := range pm.Label {
|
||||||
|
name, value := label.GetName(), label.GetValue()
|
||||||
|
if value != magicString || isLabelCurried(c, name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch name {
|
||||||
|
case "code":
|
||||||
|
code = true
|
||||||
|
case "method":
|
||||||
|
method = true
|
||||||
|
default:
|
||||||
|
panic("metric partitioned with non-supported labels")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func isLabelCurried(c prometheus.Collector, label string) bool {
|
||||||
|
// This is even hackier than the label test above.
|
||||||
|
// We essentially try to curry again and see if it works.
|
||||||
|
// But for that, we need to type-convert to the two
|
||||||
|
// types we use here, ObserverVec or *CounterVec.
|
||||||
|
switch v := c.(type) {
|
||||||
|
case *prometheus.CounterVec:
|
||||||
|
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case prometheus.ObserverVec:
|
||||||
|
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("unsupported metric vec type")
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
|
||||||
|
labels := prometheus.Labels{}
|
||||||
|
|
||||||
|
if !(code || method) {
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|
||||||
|
if code {
|
||||||
|
labels["code"] = sanitizeCode(status)
|
||||||
|
}
|
||||||
|
if method {
|
||||||
|
labels["method"] = sanitizeMethod(reqMethod, extraMethods...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func computeApproximateRequestSize(r *http.Request) int {
|
||||||
|
s := 0
|
||||||
|
if r.URL != nil {
|
||||||
|
s += len(r.URL.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
s += len(r.Method)
|
||||||
|
s += len(r.Proto)
|
||||||
|
for name, values := range r.Header {
|
||||||
|
s += len(name)
|
||||||
|
for _, value := range values {
|
||||||
|
s += len(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s += len(r.Host)
|
||||||
|
|
||||||
|
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||||
|
|
||||||
|
if r.ContentLength != -1 {
|
||||||
|
s += int(r.ContentLength)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the wrapped http.Handler has a known method, it will be sanitized and returned.
|
||||||
|
// Otherwise, "unknown" will be returned. The known method list can be extended
|
||||||
|
// as needed by using extraMethods parameter.
|
||||||
|
func sanitizeMethod(m string, extraMethods ...string) string {
|
||||||
|
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for
|
||||||
|
// the methods chosen as default.
|
||||||
|
switch m {
|
||||||
|
case "GET", "get":
|
||||||
|
return "get"
|
||||||
|
case "PUT", "put":
|
||||||
|
return "put"
|
||||||
|
case "HEAD", "head":
|
||||||
|
return "head"
|
||||||
|
case "POST", "post":
|
||||||
|
return "post"
|
||||||
|
case "DELETE", "delete":
|
||||||
|
return "delete"
|
||||||
|
case "CONNECT", "connect":
|
||||||
|
return "connect"
|
||||||
|
case "OPTIONS", "options":
|
||||||
|
return "options"
|
||||||
|
case "NOTIFY", "notify":
|
||||||
|
return "notify"
|
||||||
|
case "TRACE", "trace":
|
||||||
|
return "trace"
|
||||||
|
case "PATCH", "patch":
|
||||||
|
return "patch"
|
||||||
|
default:
|
||||||
|
for _, method := range extraMethods {
|
||||||
|
if strings.EqualFold(m, method) {
|
||||||
|
return strings.ToLower(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the wrapped http.Handler has not set a status code, i.e. the value is
|
||||||
|
// currently 0, sanitizeCode will return 200, for consistency with behavior in
|
||||||
|
// the stdlib.
|
||||||
|
func sanitizeCode(s int) string {
|
||||||
|
// See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
|
||||||
|
switch s {
|
||||||
|
case 100:
|
||||||
|
return "100"
|
||||||
|
case 101:
|
||||||
|
return "101"
|
||||||
|
|
||||||
|
case 200, 0:
|
||||||
|
return "200"
|
||||||
|
case 201:
|
||||||
|
return "201"
|
||||||
|
case 202:
|
||||||
|
return "202"
|
||||||
|
case 203:
|
||||||
|
return "203"
|
||||||
|
case 204:
|
||||||
|
return "204"
|
||||||
|
case 205:
|
||||||
|
return "205"
|
||||||
|
case 206:
|
||||||
|
return "206"
|
||||||
|
|
||||||
|
case 300:
|
||||||
|
return "300"
|
||||||
|
case 301:
|
||||||
|
return "301"
|
||||||
|
case 302:
|
||||||
|
return "302"
|
||||||
|
case 304:
|
||||||
|
return "304"
|
||||||
|
case 305:
|
||||||
|
return "305"
|
||||||
|
case 307:
|
||||||
|
return "307"
|
||||||
|
|
||||||
|
case 400:
|
||||||
|
return "400"
|
||||||
|
case 401:
|
||||||
|
return "401"
|
||||||
|
case 402:
|
||||||
|
return "402"
|
||||||
|
case 403:
|
||||||
|
return "403"
|
||||||
|
case 404:
|
||||||
|
return "404"
|
||||||
|
case 405:
|
||||||
|
return "405"
|
||||||
|
case 406:
|
||||||
|
return "406"
|
||||||
|
case 407:
|
||||||
|
return "407"
|
||||||
|
case 408:
|
||||||
|
return "408"
|
||||||
|
case 409:
|
||||||
|
return "409"
|
||||||
|
case 410:
|
||||||
|
return "410"
|
||||||
|
case 411:
|
||||||
|
return "411"
|
||||||
|
case 412:
|
||||||
|
return "412"
|
||||||
|
case 413:
|
||||||
|
return "413"
|
||||||
|
case 414:
|
||||||
|
return "414"
|
||||||
|
case 415:
|
||||||
|
return "415"
|
||||||
|
case 416:
|
||||||
|
return "416"
|
||||||
|
case 417:
|
||||||
|
return "417"
|
||||||
|
case 418:
|
||||||
|
return "418"
|
||||||
|
|
||||||
|
case 500:
|
||||||
|
return "500"
|
||||||
|
case 501:
|
||||||
|
return "501"
|
||||||
|
case 502:
|
||||||
|
return "502"
|
||||||
|
case 503:
|
||||||
|
return "503"
|
||||||
|
case 504:
|
||||||
|
return "504"
|
||||||
|
case 505:
|
||||||
|
return "505"
|
||||||
|
|
||||||
|
case 428:
|
||||||
|
return "428"
|
||||||
|
case 429:
|
||||||
|
return "429"
|
||||||
|
case 431:
|
||||||
|
return "431"
|
||||||
|
case 511:
|
||||||
|
return "511"
|
||||||
|
|
||||||
|
default:
|
||||||
|
if s >= 100 && s <= 599 {
|
||||||
|
return strconv.Itoa(s)
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
84
vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
Normal file
84
vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Option are used to configure both handler (middleware) or round tripper.
|
||||||
|
type Option interface {
|
||||||
|
apply(*options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LabelValueFromCtx are used to compute the label value from request context.
|
||||||
|
// Context can be filled with values from request through middleware.
|
||||||
|
type LabelValueFromCtx func(ctx context.Context) string
|
||||||
|
|
||||||
|
// options store options for both a handler or round tripper.
|
||||||
|
type options struct {
|
||||||
|
extraMethods []string
|
||||||
|
getExemplarFn func(requestCtx context.Context) prometheus.Labels
|
||||||
|
extraLabelsFromCtx map[string]LabelValueFromCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultOptions() *options {
|
||||||
|
return &options{
|
||||||
|
getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
|
||||||
|
extraLabelsFromCtx: map[string]LabelValueFromCtx{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *options) emptyDynamicLabels() prometheus.Labels {
|
||||||
|
labels := prometheus.Labels{}
|
||||||
|
|
||||||
|
for label := range o.extraLabelsFromCtx {
|
||||||
|
labels[label] = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|
||||||
|
type optionApplyFunc func(*options)
|
||||||
|
|
||||||
|
func (o optionApplyFunc) apply(opt *options) { o(opt) }
|
||||||
|
|
||||||
|
// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
|
||||||
|
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
|
||||||
|
func WithExtraMethods(methods ...string) Option {
|
||||||
|
return optionApplyFunc(func(o *options) {
|
||||||
|
o.extraMethods = methods
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
|
||||||
|
// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
|
||||||
|
// metric will continue to observe/increment.
|
||||||
|
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
|
||||||
|
return optionApplyFunc(func(o *options) {
|
||||||
|
o.getExemplarFn = getExemplarFn
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLabelFromCtx registers a label for dynamic resolution with access to context.
|
||||||
|
// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
|
||||||
|
func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
|
||||||
|
return optionApplyFunc(func(o *options) {
|
||||||
|
o.extraLabelsFromCtx[name] = valueFn
|
||||||
|
})
|
||||||
|
}
|
1075
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
Normal file
1075
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
785
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
Normal file
785
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
Normal file
|
@ -0,0 +1,785 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/beorn7/perks/quantile"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// quantileLabel is used for the label that defines the quantile in a
|
||||||
|
// summary.
|
||||||
|
const quantileLabel = "quantile"
|
||||||
|
|
||||||
|
// A Summary captures individual observations from an event or sample stream and
|
||||||
|
// summarizes them in a manner similar to traditional summary statistics: 1. sum
|
||||||
|
// of observations, 2. observation count, 3. rank estimations.
|
||||||
|
//
|
||||||
|
// A typical use-case is the observation of request latencies. By default, a
|
||||||
|
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||||
|
// as rank estimations. However, the default behavior will change in the
|
||||||
|
// upcoming v1.0.0 of the library. There will be no rank estimations at all by
|
||||||
|
// default. For a sane transition, it is recommended to set the desired rank
|
||||||
|
// estimations explicitly.
|
||||||
|
//
|
||||||
|
// Note that the rank estimations cannot be aggregated in a meaningful way with
|
||||||
|
// the Prometheus query language (i.e. you cannot average or add them). If you
|
||||||
|
// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
|
||||||
|
// queries served across all instances of a service), consider the Histogram
|
||||||
|
// metric type. See the Prometheus documentation for more details.
|
||||||
|
//
|
||||||
|
// To create Summary instances, use NewSummary.
|
||||||
|
type Summary interface {
|
||||||
|
Metric
|
||||||
|
Collector
|
||||||
|
|
||||||
|
// Observe adds a single observation to the summary. Observations are
|
||||||
|
// usually positive or zero. Negative observations are accepted but
|
||||||
|
// prevent current versions of Prometheus from properly detecting
|
||||||
|
// counter resets in the sum of observations. See
|
||||||
|
// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
|
||||||
|
// for details.
|
||||||
|
Observe(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errQuantileLabelNotAllowed = fmt.Errorf(
|
||||||
|
"%q is not allowed as label name in summaries", quantileLabel,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Default values for SummaryOpts.
|
||||||
|
const (
|
||||||
|
// DefMaxAge is the default duration for which observations stay
|
||||||
|
// relevant.
|
||||||
|
DefMaxAge time.Duration = 10 * time.Minute
|
||||||
|
// DefAgeBuckets is the default number of buckets used to calculate the
|
||||||
|
// age of observations.
|
||||||
|
DefAgeBuckets = 5
|
||||||
|
// DefBufCap is the standard buffer size for collecting Summary observations.
|
||||||
|
DefBufCap = 500
|
||||||
|
)
|
||||||
|
|
||||||
|
// SummaryOpts bundles the options for creating a Summary metric. It is
|
||||||
|
// mandatory to set Name to a non-empty string. While all other fields are
|
||||||
|
// optional and can safely be left at their zero value, it is recommended to set
|
||||||
|
// a help string and to explicitly set the Objectives field to the desired value
|
||||||
|
// as the default value will change in the upcoming v1.0.0 of the library.
|
||||||
|
type SummaryOpts struct {
|
||||||
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||||
|
// name of the Summary (created by joining these components with
|
||||||
|
// "_"). Only Name is mandatory, the others merely help structuring the
|
||||||
|
// name. Note that the fully-qualified name of the Summary must be a
|
||||||
|
// valid Prometheus metric name.
|
||||||
|
Namespace string
|
||||||
|
Subsystem string
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Help provides information about this Summary.
|
||||||
|
//
|
||||||
|
// Metrics with the same fully-qualified name must have the same Help
|
||||||
|
// string.
|
||||||
|
Help string
|
||||||
|
|
||||||
|
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
||||||
|
// with the same fully-qualified name must have the same label names in
|
||||||
|
// their ConstLabels.
|
||||||
|
//
|
||||||
|
// Due to the way a Summary is represented in the Prometheus text format
|
||||||
|
// and how it is handled by the Prometheus server internally, “quantile”
|
||||||
|
// is an illegal label name. Construction of a Summary or SummaryVec
|
||||||
|
// will panic if this label name is used in ConstLabels.
|
||||||
|
//
|
||||||
|
// ConstLabels are only used rarely. In particular, do not use them to
|
||||||
|
// attach the same labels to all your metrics. Those use cases are
|
||||||
|
// better covered by target labels set by the scraping Prometheus
|
||||||
|
// server, or by one specific metric (e.g. a build_info or a
|
||||||
|
// machine_role metric). See also
|
||||||
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
|
||||||
|
ConstLabels Labels
|
||||||
|
|
||||||
|
// Objectives defines the quantile rank estimates with their respective
|
||||||
|
// absolute error. If Objectives[q] = e, then the value reported for q
|
||||||
|
// will be the φ-quantile value for some φ between q-e and q+e. The
|
||||||
|
// default value is an empty map, resulting in a summary without
|
||||||
|
// quantiles.
|
||||||
|
Objectives map[float64]float64
|
||||||
|
|
||||||
|
// MaxAge defines the duration for which an observation stays relevant
|
||||||
|
// for the summary. Only applies to pre-calculated quantiles, does not
|
||||||
|
// apply to _sum and _count. Must be positive. The default value is
|
||||||
|
// DefMaxAge.
|
||||||
|
MaxAge time.Duration
|
||||||
|
|
||||||
|
// AgeBuckets is the number of buckets used to exclude observations that
|
||||||
|
// are older than MaxAge from the summary. A higher number has a
|
||||||
|
// resource penalty, so only increase it if the higher resolution is
|
||||||
|
// really required. For very high observation rates, you might want to
|
||||||
|
// reduce the number of age buckets. With only one age bucket, you will
|
||||||
|
// effectively see a complete reset of the summary each time MaxAge has
|
||||||
|
// passed. The default value is DefAgeBuckets.
|
||||||
|
AgeBuckets uint32
|
||||||
|
|
||||||
|
// BufCap defines the default sample stream buffer size. The default
|
||||||
|
// value of DefBufCap should suffice for most uses. If there is a need
|
||||||
|
// to increase the value, a multiple of 500 is recommended (because that
|
||||||
|
// is the internal buffer size of the underlying package
|
||||||
|
// "github.com/bmizerany/perks/quantile").
|
||||||
|
BufCap uint32
|
||||||
|
|
||||||
|
// now is for testing purposes, by default it's time.Now.
|
||||||
|
now func() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// SummaryVecOpts bundles the options to create a SummaryVec metric.
|
||||||
|
// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
|
||||||
|
// is optional and can safely be left to its default value.
|
||||||
|
type SummaryVecOpts struct {
|
||||||
|
SummaryOpts
|
||||||
|
|
||||||
|
// VariableLabels are used to partition the metric vector by the given set
|
||||||
|
// of labels. Each label value will be constrained with the optional Constraint
|
||||||
|
// function, if provided.
|
||||||
|
VariableLabels ConstrainableLabels
|
||||||
|
}
|
||||||
|
|
||||||
|
// Problem with the sliding-window decay algorithm... The Merge method of
|
||||||
|
// perk/quantile is actually not working as advertised - and it might be
|
||||||
|
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||||
|
// summaries in the first place. To avoid using Merge, we are currently adding
|
||||||
|
// observations to _each_ age bucket, i.e. the effort to add a sample is
|
||||||
|
// essentially multiplied by the number of age buckets. When rotating age
|
||||||
|
// buckets, we empty the previous head stream. On scrape time, we simply take
|
||||||
|
// the quantiles from the head stream (no merging required). Result: More effort
|
||||||
|
// on observation time, less effort on scrape time, which is exactly the
|
||||||
|
// opposite of what we try to accomplish, but at least the results are correct.
|
||||||
|
//
|
||||||
|
// The quite elegant previous contraption to merge the age buckets efficiently
|
||||||
|
// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
|
||||||
|
// can't be used anymore.
|
||||||
|
|
||||||
|
// NewSummary creates a new Summary based on the provided SummaryOpts.
|
||||||
|
func NewSummary(opts SummaryOpts) Summary {
|
||||||
|
return newSummary(
|
||||||
|
NewDesc(
|
||||||
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
opts.Help,
|
||||||
|
nil,
|
||||||
|
opts.ConstLabels,
|
||||||
|
),
|
||||||
|
opts,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
|
if len(desc.variableLabels.names) != len(labelValues) {
|
||||||
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, n := range desc.variableLabels.names {
|
||||||
|
if n == quantileLabel {
|
||||||
|
panic(errQuantileLabelNotAllowed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, lp := range desc.constLabelPairs {
|
||||||
|
if lp.GetName() == quantileLabel {
|
||||||
|
panic(errQuantileLabelNotAllowed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Objectives == nil {
|
||||||
|
opts.Objectives = map[float64]float64{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.MaxAge < 0 {
|
||||||
|
panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
|
||||||
|
}
|
||||||
|
if opts.MaxAge == 0 {
|
||||||
|
opts.MaxAge = DefMaxAge
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.AgeBuckets == 0 {
|
||||||
|
opts.AgeBuckets = DefAgeBuckets
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.BufCap == 0 {
|
||||||
|
opts.BufCap = DefBufCap
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.now == nil {
|
||||||
|
opts.now = time.Now
|
||||||
|
}
|
||||||
|
if len(opts.Objectives) == 0 {
|
||||||
|
// Use the lock-free implementation of a Summary without objectives.
|
||||||
|
s := &noObjectivesSummary{
|
||||||
|
desc: desc,
|
||||||
|
labelPairs: MakeLabelPairs(desc, labelValues),
|
||||||
|
counts: [2]*summaryCounts{{}, {}},
|
||||||
|
}
|
||||||
|
s.init(s) // Init self-collection.
|
||||||
|
s.createdTs = timestamppb.New(opts.now())
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
s := &summary{
|
||||||
|
desc: desc,
|
||||||
|
|
||||||
|
objectives: opts.Objectives,
|
||||||
|
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
|
||||||
|
|
||||||
|
labelPairs: MakeLabelPairs(desc, labelValues),
|
||||||
|
|
||||||
|
hotBuf: make([]float64, 0, opts.BufCap),
|
||||||
|
coldBuf: make([]float64, 0, opts.BufCap),
|
||||||
|
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
|
||||||
|
}
|
||||||
|
s.headStreamExpTime = opts.now().Add(s.streamDuration)
|
||||||
|
s.hotBufExpTime = s.headStreamExpTime
|
||||||
|
|
||||||
|
for i := uint32(0); i < opts.AgeBuckets; i++ {
|
||||||
|
s.streams = append(s.streams, s.newStream())
|
||||||
|
}
|
||||||
|
s.headStream = s.streams[0]
|
||||||
|
|
||||||
|
for qu := range s.objectives {
|
||||||
|
s.sortedObjectives = append(s.sortedObjectives, qu)
|
||||||
|
}
|
||||||
|
sort.Float64s(s.sortedObjectives)
|
||||||
|
|
||||||
|
s.init(s) // Init self-collection.
|
||||||
|
s.createdTs = timestamppb.New(opts.now())
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type summary struct {
|
||||||
|
selfCollector
|
||||||
|
|
||||||
|
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
||||||
|
mtx sync.Mutex // Protects every other moving part.
|
||||||
|
// Lock bufMtx before mtx if both are needed.
|
||||||
|
|
||||||
|
desc *Desc
|
||||||
|
|
||||||
|
objectives map[float64]float64
|
||||||
|
sortedObjectives []float64
|
||||||
|
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
|
||||||
|
sum float64
|
||||||
|
cnt uint64
|
||||||
|
|
||||||
|
hotBuf, coldBuf []float64
|
||||||
|
|
||||||
|
streams []*quantile.Stream
|
||||||
|
streamDuration time.Duration
|
||||||
|
headStream *quantile.Stream
|
||||||
|
headStreamIdx int
|
||||||
|
headStreamExpTime, hotBufExpTime time.Time
|
||||||
|
|
||||||
|
createdTs *timestamppb.Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *summary) Desc() *Desc {
|
||||||
|
return s.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *summary) Observe(v float64) {
|
||||||
|
s.bufMtx.Lock()
|
||||||
|
defer s.bufMtx.Unlock()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
if now.After(s.hotBufExpTime) {
|
||||||
|
s.asyncFlush(now)
|
||||||
|
}
|
||||||
|
s.hotBuf = append(s.hotBuf, v)
|
||||||
|
if len(s.hotBuf) == cap(s.hotBuf) {
|
||||||
|
s.asyncFlush(now)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *summary) Write(out *dto.Metric) error {
|
||||||
|
sum := &dto.Summary{
|
||||||
|
CreatedTimestamp: s.createdTs,
|
||||||
|
}
|
||||||
|
qs := make([]*dto.Quantile, 0, len(s.objectives))
|
||||||
|
|
||||||
|
s.bufMtx.Lock()
|
||||||
|
s.mtx.Lock()
|
||||||
|
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
|
||||||
|
s.swapBufs(time.Now())
|
||||||
|
s.bufMtx.Unlock()
|
||||||
|
|
||||||
|
s.flushColdBuf()
|
||||||
|
sum.SampleCount = proto.Uint64(s.cnt)
|
||||||
|
sum.SampleSum = proto.Float64(s.sum)
|
||||||
|
|
||||||
|
for _, rank := range s.sortedObjectives {
|
||||||
|
var q float64
|
||||||
|
if s.headStream.Count() == 0 {
|
||||||
|
q = math.NaN()
|
||||||
|
} else {
|
||||||
|
q = s.headStream.Query(rank)
|
||||||
|
}
|
||||||
|
qs = append(qs, &dto.Quantile{
|
||||||
|
Quantile: proto.Float64(rank),
|
||||||
|
Value: proto.Float64(q),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mtx.Unlock()
|
||||||
|
|
||||||
|
if len(qs) > 0 {
|
||||||
|
sort.Sort(quantSort(qs))
|
||||||
|
}
|
||||||
|
sum.Quantile = qs
|
||||||
|
|
||||||
|
out.Summary = sum
|
||||||
|
out.Label = s.labelPairs
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *summary) newStream() *quantile.Stream {
|
||||||
|
return quantile.NewTargeted(s.objectives)
|
||||||
|
}
|
||||||
|
|
||||||
|
// asyncFlush needs bufMtx locked.
|
||||||
|
func (s *summary) asyncFlush(now time.Time) {
|
||||||
|
s.mtx.Lock()
|
||||||
|
s.swapBufs(now)
|
||||||
|
|
||||||
|
// Unblock the original goroutine that was responsible for the mutation
|
||||||
|
// that triggered the compaction. But hold onto the global non-buffer
|
||||||
|
// state mutex until the operation finishes.
|
||||||
|
go func() {
|
||||||
|
s.flushColdBuf()
|
||||||
|
s.mtx.Unlock()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// rotateStreams needs mtx AND bufMtx locked.
|
||||||
|
func (s *summary) maybeRotateStreams() {
|
||||||
|
for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
|
||||||
|
s.headStream.Reset()
|
||||||
|
s.headStreamIdx++
|
||||||
|
if s.headStreamIdx >= len(s.streams) {
|
||||||
|
s.headStreamIdx = 0
|
||||||
|
}
|
||||||
|
s.headStream = s.streams[s.headStreamIdx]
|
||||||
|
s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// flushColdBuf needs mtx locked.
|
||||||
|
func (s *summary) flushColdBuf() {
|
||||||
|
for _, v := range s.coldBuf {
|
||||||
|
for _, stream := range s.streams {
|
||||||
|
stream.Insert(v)
|
||||||
|
}
|
||||||
|
s.cnt++
|
||||||
|
s.sum += v
|
||||||
|
}
|
||||||
|
s.coldBuf = s.coldBuf[0:0]
|
||||||
|
s.maybeRotateStreams()
|
||||||
|
}
|
||||||
|
|
||||||
|
// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
|
||||||
|
func (s *summary) swapBufs(now time.Time) {
|
||||||
|
if len(s.coldBuf) != 0 {
|
||||||
|
panic("coldBuf is not empty")
|
||||||
|
}
|
||||||
|
s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
|
||||||
|
// hotBuf is now empty and gets new expiration set.
|
||||||
|
for now.After(s.hotBufExpTime) {
|
||||||
|
s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type summaryCounts struct {
|
||||||
|
// sumBits contains the bits of the float64 representing the sum of all
|
||||||
|
// observations. sumBits and count have to go first in the struct to
|
||||||
|
// guarantee alignment for atomic operations.
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
sumBits uint64
|
||||||
|
count uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type noObjectivesSummary struct {
|
||||||
|
// countAndHotIdx enables lock-free writes with use of atomic updates.
|
||||||
|
// The most significant bit is the hot index [0 or 1] of the count field
|
||||||
|
// below. Observe calls update the hot one. All remaining bits count the
|
||||||
|
// number of Observe calls. Observe starts by incrementing this counter,
|
||||||
|
// and finish by incrementing the count field in the respective
|
||||||
|
// summaryCounts, as a marker for completion.
|
||||||
|
//
|
||||||
|
// Calls of the Write method (which are non-mutating reads from the
|
||||||
|
// perspective of the summary) swap the hot–cold under the writeMtx
|
||||||
|
// lock. A cooldown is awaited (while locked) by comparing the number of
|
||||||
|
// observations with the initiation count. Once they match, then the
|
||||||
|
// last observation on the now cool one has completed. All cool fields must
|
||||||
|
// be merged into the new hot before releasing writeMtx.
|
||||||
|
|
||||||
|
// Fields with atomic access first! See alignment constraint:
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
countAndHotIdx uint64
|
||||||
|
|
||||||
|
selfCollector
|
||||||
|
desc *Desc
|
||||||
|
writeMtx sync.Mutex // Only used in the Write method.
|
||||||
|
|
||||||
|
// Two counts, one is "hot" for lock-free observations, the other is
|
||||||
|
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||||
|
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||||
|
counts [2]*summaryCounts
|
||||||
|
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
|
||||||
|
createdTs *timestamppb.Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *noObjectivesSummary) Desc() *Desc {
|
||||||
|
return s.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *noObjectivesSummary) Observe(v float64) {
|
||||||
|
// We increment h.countAndHotIdx so that the counter in the lower
|
||||||
|
// 63 bits gets incremented. At the same time, we get the new value
|
||||||
|
// back, which we can use to find the currently-hot counts.
|
||||||
|
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
||||||
|
hotCounts := s.counts[n>>63]
|
||||||
|
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||||
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Increment count last as we take it as a signal that the observation
|
||||||
|
// is complete.
|
||||||
|
atomic.AddUint64(&hotCounts.count, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *noObjectivesSummary) Write(out *dto.Metric) error {
|
||||||
|
// For simplicity, we protect this whole method by a mutex. It is not in
|
||||||
|
// the hot path, i.e. Observe is called much more often than Write. The
|
||||||
|
// complication of making Write lock-free isn't worth it, if possible at
|
||||||
|
// all.
|
||||||
|
s.writeMtx.Lock()
|
||||||
|
defer s.writeMtx.Unlock()
|
||||||
|
|
||||||
|
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
|
||||||
|
// without touching the count bits. See the struct comments for a full
|
||||||
|
// description of the algorithm.
|
||||||
|
n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
|
||||||
|
// count is contained unchanged in the lower 63 bits.
|
||||||
|
count := n & ((1 << 63) - 1)
|
||||||
|
// The most significant bit tells us which counts is hot. The complement
|
||||||
|
// is thus the cold one.
|
||||||
|
hotCounts := s.counts[n>>63]
|
||||||
|
coldCounts := s.counts[(^n)>>63]
|
||||||
|
|
||||||
|
// Await cooldown.
|
||||||
|
for count != atomic.LoadUint64(&coldCounts.count) {
|
||||||
|
runtime.Gosched() // Let observations get work done.
|
||||||
|
}
|
||||||
|
|
||||||
|
sum := &dto.Summary{
|
||||||
|
SampleCount: proto.Uint64(count),
|
||||||
|
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||||
|
CreatedTimestamp: s.createdTs,
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Summary = sum
|
||||||
|
out.Label = s.labelPairs
|
||||||
|
|
||||||
|
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||||
|
atomic.AddUint64(&hotCounts.count, count)
|
||||||
|
atomic.StoreUint64(&coldCounts.count, 0)
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
|
||||||
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
|
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type quantSort []*dto.Quantile
|
||||||
|
|
||||||
|
func (s quantSort) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s quantSort) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s quantSort) Less(i, j int) bool {
|
||||||
|
return s[i].GetQuantile() < s[j].GetQuantile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SummaryVec is a Collector that bundles a set of Summaries that all share the
|
||||||
|
// same Desc, but have different values for their variable labels. This is used
|
||||||
|
// if you want to count the same thing partitioned by various dimensions
|
||||||
|
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||||
|
// instances with NewSummaryVec.
|
||||||
|
type SummaryVec struct {
|
||||||
|
*MetricVec
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||||
|
// partitioned by the given label names.
|
||||||
|
//
|
||||||
|
// Due to the way a Summary is represented in the Prometheus text format and how
|
||||||
|
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
||||||
|
// label name. NewSummaryVec will panic if this label name is used.
|
||||||
|
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||||
|
return V2.NewSummaryVec(SummaryVecOpts{
|
||||||
|
SummaryOpts: opts,
|
||||||
|
VariableLabels: UnconstrainedLabels(labelNames),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
|
||||||
|
func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
|
||||||
|
for _, ln := range opts.VariableLabels.labelNames() {
|
||||||
|
if ln == quantileLabel {
|
||||||
|
panic(errQuantileLabelNotAllowed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
desc := V2.NewDesc(
|
||||||
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
opts.Help,
|
||||||
|
opts.VariableLabels,
|
||||||
|
opts.ConstLabels,
|
||||||
|
)
|
||||||
|
return &SummaryVec{
|
||||||
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
|
return newSummary(desc, opts.SummaryOpts, lvs...)
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricWithLabelValues returns the Summary for the given slice of label
|
||||||
|
// values (same order as the variable labels in Desc). If that combination of
|
||||||
|
// label values is accessed for the first time, a new Summary is created.
|
||||||
|
//
|
||||||
|
// It is possible to call this method without using the returned Summary to only
|
||||||
|
// create the new Summary but leave it at its starting value, a Summary without
|
||||||
|
// any observations.
|
||||||
|
//
|
||||||
|
// Keeping the Summary for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Summary from the SummaryVec. In that case,
|
||||||
|
// the Summary will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Summary with the same label values is created later. See also the CounterVec
|
||||||
|
// example.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of variable labels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the GaugeVec example.
|
||||||
|
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
||||||
|
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||||
|
if metric != nil {
|
||||||
|
return metric.(Observer), err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricWith returns the Summary for the given Labels map (the label names
|
||||||
|
// must match those of the variable labels in Desc). If that label map is
|
||||||
|
// accessed for the first time, a new Summary is created. Implications of
|
||||||
|
// creating a Summary without using it and keeping the Summary for later use are
|
||||||
|
// the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the variable labels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
|
||||||
|
metric, err := v.MetricVec.GetMetricWith(labels)
|
||||||
|
if metric != nil {
|
||||||
|
return metric.(Observer), err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
|
// error allows shortcuts like
|
||||||
|
//
|
||||||
|
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||||
|
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
||||||
|
s, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
|
//
|
||||||
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||||
|
func (v *SummaryVec) With(labels Labels) Observer {
|
||||||
|
s, err := v.GetMetricWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the SummaryVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
|
||||||
|
vec, err := v.MetricVec.CurryWith(labels)
|
||||||
|
if vec != nil {
|
||||||
|
return &SummaryVec{vec}, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustCurryWith works as CurryWith but panics where CurryWith would have
|
||||||
|
// returned an error.
|
||||||
|
func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
|
||||||
|
vec, err := v.CurryWith(labels)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return vec
|
||||||
|
}
|
||||||
|
|
||||||
|
type constSummary struct {
|
||||||
|
desc *Desc
|
||||||
|
count uint64
|
||||||
|
sum float64
|
||||||
|
quantiles map[float64]float64
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
createdTs *timestamppb.Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *constSummary) Desc() *Desc {
|
||||||
|
return s.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *constSummary) Write(out *dto.Metric) error {
|
||||||
|
sum := &dto.Summary{
|
||||||
|
CreatedTimestamp: s.createdTs,
|
||||||
|
}
|
||||||
|
qs := make([]*dto.Quantile, 0, len(s.quantiles))
|
||||||
|
|
||||||
|
sum.SampleCount = proto.Uint64(s.count)
|
||||||
|
sum.SampleSum = proto.Float64(s.sum)
|
||||||
|
|
||||||
|
for rank, q := range s.quantiles {
|
||||||
|
qs = append(qs, &dto.Quantile{
|
||||||
|
Quantile: proto.Float64(rank),
|
||||||
|
Value: proto.Float64(q),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(qs) > 0 {
|
||||||
|
sort.Sort(quantSort(qs))
|
||||||
|
}
|
||||||
|
sum.Quantile = qs
|
||||||
|
|
||||||
|
out.Summary = sum
|
||||||
|
out.Label = s.labelPairs
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConstSummary returns a metric representing a Prometheus summary with fixed
|
||||||
|
// values for the count, sum, and quantiles. As those parameters cannot be
|
||||||
|
// changed, the returned value does not implement the Summary interface (but
|
||||||
|
// only the Metric interface). Users of this package will not have much use for
|
||||||
|
// it in regular operations. However, when implementing custom Collectors, it is
|
||||||
|
// useful as a throw-away metric that is generated on the fly to send it to
|
||||||
|
// Prometheus in the Collect method.
|
||||||
|
//
|
||||||
|
// quantiles maps ranks to quantile values. For example, a median latency of
|
||||||
|
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
|
||||||
|
//
|
||||||
|
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
||||||
|
//
|
||||||
|
// NewConstSummary returns an error if the length of labelValues is not
|
||||||
|
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||||
|
func NewConstSummary(
|
||||||
|
desc *Desc,
|
||||||
|
count uint64,
|
||||||
|
sum float64,
|
||||||
|
quantiles map[float64]float64,
|
||||||
|
labelValues ...string,
|
||||||
|
) (Metric, error) {
|
||||||
|
if desc.err != nil {
|
||||||
|
return nil, desc.err
|
||||||
|
}
|
||||||
|
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &constSummary{
|
||||||
|
desc: desc,
|
||||||
|
count: count,
|
||||||
|
sum: sum,
|
||||||
|
quantiles: quantiles,
|
||||||
|
labelPairs: MakeLabelPairs(desc, labelValues),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNewConstSummary is a version of NewConstSummary that panics where
|
||||||
|
// NewConstMetric would have returned an error.
|
||||||
|
func MustNewConstSummary(
|
||||||
|
desc *Desc,
|
||||||
|
count uint64,
|
||||||
|
sum float64,
|
||||||
|
quantiles map[float64]float64,
|
||||||
|
labelValues ...string,
|
||||||
|
) Metric {
|
||||||
|
m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
81
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
Normal file
81
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
// Copyright 2016 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// Timer is a helper type to time functions. Use NewTimer to create new
|
||||||
|
// instances.
|
||||||
|
type Timer struct {
|
||||||
|
begin time.Time
|
||||||
|
observer Observer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
||||||
|
// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
|
||||||
|
// later on will be also supported.
|
||||||
|
// Timer is usually used to time a function call in the
|
||||||
|
// following way:
|
||||||
|
//
|
||||||
|
// func TimeMe() {
|
||||||
|
// timer := NewTimer(myHistogram)
|
||||||
|
// defer timer.ObserveDuration()
|
||||||
|
// // Do actual work.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// or
|
||||||
|
//
|
||||||
|
// func TimeMeWithExemplar() {
|
||||||
|
// timer := NewTimer(myHistogram)
|
||||||
|
// defer timer.ObserveDurationWithExemplar(exemplar)
|
||||||
|
// // Do actual work.
|
||||||
|
// }
|
||||||
|
func NewTimer(o Observer) *Timer {
|
||||||
|
return &Timer{
|
||||||
|
begin: time.Now(),
|
||||||
|
observer: o,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObserveDuration records the duration passed since the Timer was created with
|
||||||
|
// NewTimer. It calls the Observe method of the Observer provided during
|
||||||
|
// construction with the duration in seconds as an argument. The observed
|
||||||
|
// duration is also returned. ObserveDuration is usually called with a defer
|
||||||
|
// statement.
|
||||||
|
//
|
||||||
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
|
// if used with Go1.9+.
|
||||||
|
func (t *Timer) ObserveDuration() time.Duration {
|
||||||
|
d := time.Since(t.begin)
|
||||||
|
if t.observer != nil {
|
||||||
|
t.observer.Observe(d.Seconds())
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObserveDurationWithExemplar is like ObserveDuration, but it will also
|
||||||
|
// observe exemplar with the duration unless exemplar is nil or provided Observer can't
|
||||||
|
// be casted to ExemplarObserver.
|
||||||
|
func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
|
||||||
|
d := time.Since(t.begin)
|
||||||
|
eo, ok := t.observer.(ExemplarObserver)
|
||||||
|
if ok && exemplar != nil {
|
||||||
|
eo.ObserveWithExemplar(d.Seconds(), exemplar)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
if t.observer != nil {
|
||||||
|
t.observer.Observe(d.Seconds())
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
42
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
Normal file
42
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// UntypedOpts is an alias for Opts. See there for doc comments.
|
||||||
|
type UntypedOpts Opts
|
||||||
|
|
||||||
|
// UntypedFunc works like GaugeFunc but the collected metric is of type
|
||||||
|
// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
|
||||||
|
// type.
|
||||||
|
//
|
||||||
|
// To create UntypedFunc instances, use NewUntypedFunc.
|
||||||
|
type UntypedFunc interface {
|
||||||
|
Metric
|
||||||
|
Collector
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUntypedFunc creates a new UntypedFunc based on the provided
|
||||||
|
// UntypedOpts. The value reported is determined by calling the given function
|
||||||
|
// from within the Write method. Take into account that metric collection may
|
||||||
|
// happen concurrently. If that results in concurrent calls to Write, like in
|
||||||
|
// the case where an UntypedFunc is directly registered with Prometheus, the
|
||||||
|
// provided function must be concurrency-safe.
|
||||||
|
func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
|
||||||
|
return newValueFunc(NewDesc(
|
||||||
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
opts.Help,
|
||||||
|
nil,
|
||||||
|
opts.ConstLabels,
|
||||||
|
), UntypedValue, function)
|
||||||
|
}
|
274
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
Normal file
274
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
Normal file
|
@ -0,0 +1,274 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValueType is an enumeration of metric types that represent a simple value.
|
||||||
|
type ValueType int
|
||||||
|
|
||||||
|
// Possible values for the ValueType enum. Use UntypedValue to mark a metric
|
||||||
|
// with an unknown type.
|
||||||
|
const (
|
||||||
|
_ ValueType = iota
|
||||||
|
CounterValue
|
||||||
|
GaugeValue
|
||||||
|
UntypedValue
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
|
||||||
|
GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
|
||||||
|
UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
|
||||||
|
)
|
||||||
|
|
||||||
|
func (v ValueType) ToDTO() *dto.MetricType {
|
||||||
|
switch v {
|
||||||
|
case CounterValue:
|
||||||
|
return CounterMetricTypePtr
|
||||||
|
case GaugeValue:
|
||||||
|
return GaugeMetricTypePtr
|
||||||
|
default:
|
||||||
|
return UntypedMetricTypePtr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// valueFunc is a generic metric for simple values retrieved on collect time
|
||||||
|
// from a function. It implements Metric and Collector. Its effective type is
|
||||||
|
// determined by ValueType. This is a low-level building block used by the
|
||||||
|
// library to back the implementations of CounterFunc, GaugeFunc, and
|
||||||
|
// UntypedFunc.
|
||||||
|
type valueFunc struct {
|
||||||
|
selfCollector
|
||||||
|
|
||||||
|
desc *Desc
|
||||||
|
valType ValueType
|
||||||
|
function func() float64
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
}
|
||||||
|
|
||||||
|
// newValueFunc returns a newly allocated valueFunc with the given Desc and
|
||||||
|
// ValueType. The value reported is determined by calling the given function
|
||||||
|
// from within the Write method. Take into account that metric collection may
|
||||||
|
// happen concurrently. If that results in concurrent calls to Write, like in
|
||||||
|
// the case where a valueFunc is directly registered with Prometheus, the
|
||||||
|
// provided function must be concurrency-safe.
|
||||||
|
func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
|
||||||
|
result := &valueFunc{
|
||||||
|
desc: desc,
|
||||||
|
valType: valueType,
|
||||||
|
function: function,
|
||||||
|
labelPairs: MakeLabelPairs(desc, nil),
|
||||||
|
}
|
||||||
|
result.init(result)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *valueFunc) Desc() *Desc {
|
||||||
|
return v.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *valueFunc) Write(out *dto.Metric) error {
|
||||||
|
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConstMetric returns a metric with one fixed value that cannot be
|
||||||
|
// changed. Users of this package will not have much use for it in regular
|
||||||
|
// operations. However, when implementing custom Collectors, it is useful as a
|
||||||
|
// throw-away metric that is generated on the fly to send it to Prometheus in
|
||||||
|
// the Collect method. NewConstMetric returns an error if the length of
|
||||||
|
// labelValues is not consistent with the variable labels in Desc or if Desc is
|
||||||
|
// invalid.
|
||||||
|
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
||||||
|
if desc.err != nil {
|
||||||
|
return nil, desc.err
|
||||||
|
}
|
||||||
|
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
metric := &dto.Metric{}
|
||||||
|
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &constMetric{
|
||||||
|
desc: desc,
|
||||||
|
metric: metric,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNewConstMetric is a version of NewConstMetric that panics where
|
||||||
|
// NewConstMetric would have returned an error.
|
||||||
|
func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
|
||||||
|
m, err := NewConstMetric(desc, valueType, value, labelValues...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
|
||||||
|
// with created timestamp set and returns an error for other metric types.
|
||||||
|
func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
|
||||||
|
if desc.err != nil {
|
||||||
|
return nil, desc.err
|
||||||
|
}
|
||||||
|
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch valueType {
|
||||||
|
case CounterValue:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
return nil, errors.New("created timestamps are only supported for counters")
|
||||||
|
}
|
||||||
|
|
||||||
|
metric := &dto.Metric{}
|
||||||
|
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &constMetric{
|
||||||
|
desc: desc,
|
||||||
|
metric: metric,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
|
||||||
|
// NewConstMetricWithCreatedTimestamp would have returned an error.
|
||||||
|
func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
|
||||||
|
m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
type constMetric struct {
|
||||||
|
desc *Desc
|
||||||
|
metric *dto.Metric
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *constMetric) Desc() *Desc {
|
||||||
|
return m.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *constMetric) Write(out *dto.Metric) error {
|
||||||
|
out.Label = m.metric.Label
|
||||||
|
out.Counter = m.metric.Counter
|
||||||
|
out.Gauge = m.metric.Gauge
|
||||||
|
out.Untyped = m.metric.Untyped
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateMetric(
|
||||||
|
t ValueType,
|
||||||
|
v float64,
|
||||||
|
labelPairs []*dto.LabelPair,
|
||||||
|
e *dto.Exemplar,
|
||||||
|
m *dto.Metric,
|
||||||
|
ct *timestamppb.Timestamp,
|
||||||
|
) error {
|
||||||
|
m.Label = labelPairs
|
||||||
|
switch t {
|
||||||
|
case CounterValue:
|
||||||
|
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
|
||||||
|
case GaugeValue:
|
||||||
|
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
|
||||||
|
case UntypedValue:
|
||||||
|
m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("encountered unknown type %v", t)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
|
||||||
|
// variable and constant labels in the provided Desc. The values for the
|
||||||
|
// variable labels are defined by the labelValues slice, which must be in the
|
||||||
|
// same order as the corresponding variable labels in the Desc.
|
||||||
|
//
|
||||||
|
// This function is only needed for custom Metric implementations. See MetricVec
|
||||||
|
// example.
|
||||||
|
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
||||||
|
totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
|
||||||
|
if totalLen == 0 {
|
||||||
|
// Super fast path.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(desc.variableLabels.names) == 0 {
|
||||||
|
// Moderately fast path.
|
||||||
|
return desc.constLabelPairs
|
||||||
|
}
|
||||||
|
labelPairs := make([]*dto.LabelPair, 0, totalLen)
|
||||||
|
for i, l := range desc.variableLabels.names {
|
||||||
|
labelPairs = append(labelPairs, &dto.LabelPair{
|
||||||
|
Name: proto.String(l),
|
||||||
|
Value: proto.String(labelValues[i]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
labelPairs = append(labelPairs, desc.constLabelPairs...)
|
||||||
|
sort.Sort(internal.LabelPairSorter(labelPairs))
|
||||||
|
return labelPairs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
|
||||||
|
const ExemplarMaxRunes = 128
|
||||||
|
|
||||||
|
// newExemplar creates a new dto.Exemplar from the provided values. An error is
|
||||||
|
// returned if any of the label names or values are invalid or if the total
|
||||||
|
// number of runes in the label names and values exceeds ExemplarMaxRunes.
|
||||||
|
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
|
||||||
|
e := &dto.Exemplar{}
|
||||||
|
e.Value = proto.Float64(value)
|
||||||
|
tsProto := timestamppb.New(ts)
|
||||||
|
if err := tsProto.CheckValid(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
e.Timestamp = tsProto
|
||||||
|
labelPairs := make([]*dto.LabelPair, 0, len(l))
|
||||||
|
var runes int
|
||||||
|
for name, value := range l {
|
||||||
|
if !checkLabelName(name) {
|
||||||
|
return nil, fmt.Errorf("exemplar label name %q is invalid", name)
|
||||||
|
}
|
||||||
|
runes += utf8.RuneCountInString(name)
|
||||||
|
if !utf8.ValidString(value) {
|
||||||
|
return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
|
||||||
|
}
|
||||||
|
runes += utf8.RuneCountInString(value)
|
||||||
|
labelPairs = append(labelPairs, &dto.LabelPair{
|
||||||
|
Name: proto.String(name),
|
||||||
|
Value: proto.String(value),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if runes > ExemplarMaxRunes {
|
||||||
|
return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
|
||||||
|
}
|
||||||
|
e.Label = labelPairs
|
||||||
|
return e, nil
|
||||||
|
}
|
709
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
Normal file
709
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
Normal file
|
@ -0,0 +1,709 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetricVec is a Collector to bundle metrics of the same name that differ in
|
||||||
|
// their label values. MetricVec is not used directly but as a building block
|
||||||
|
// for implementations of vectors of a given metric type, like GaugeVec,
|
||||||
|
// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
|
||||||
|
// used for custom Metric implementations.
|
||||||
|
//
|
||||||
|
// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
|
||||||
|
// FooVec and initialize it with NewMetricVec. Implement wrappers for
|
||||||
|
// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
|
||||||
|
// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
|
||||||
|
// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
|
||||||
|
// add the convenience methods WithLabelValues, With, and MustCurryWith, which
|
||||||
|
// panic instead of returning errors. See also the MetricVec example.
|
||||||
|
type MetricVec struct {
|
||||||
|
*metricMap
|
||||||
|
|
||||||
|
curry []curriedLabelValue
|
||||||
|
|
||||||
|
// hashAdd and hashAddByte can be replaced for testing collision handling.
|
||||||
|
hashAdd func(h uint64, s string) uint64
|
||||||
|
hashAddByte func(h uint64, b byte) uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricVec returns an initialized metricVec.
|
||||||
|
func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
||||||
|
return &MetricVec{
|
||||||
|
metricMap: &metricMap{
|
||||||
|
metrics: map[uint64][]metricWithLabelValues{},
|
||||||
|
desc: desc,
|
||||||
|
newMetric: newMetric,
|
||||||
|
},
|
||||||
|
hashAdd: hashAdd,
|
||||||
|
hashAddByte: hashAddByte,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteLabelValues removes the metric where the variable labels are the same
|
||||||
|
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
||||||
|
// returns true if a metric was deleted.
|
||||||
|
//
|
||||||
|
// It is not an error if the number of label values is not the same as the
|
||||||
|
// number of VariableLabels in Desc. However, such inconsistent label count can
|
||||||
|
// never match an actual metric, so the method will always return false in that
|
||||||
|
// case.
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
||||||
|
// alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
// See also the CounterVec example.
|
||||||
|
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
||||||
|
lvs = constrainLabelValues(m.desc, lvs, m.curry)
|
||||||
|
|
||||||
|
h, err := m.hashLabelValues(lvs)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the metric where the variable labels are the same as those
|
||||||
|
// passed in as labels. It returns true if a metric was deleted.
|
||||||
|
//
|
||||||
|
// It is not an error if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the VariableLabels in Desc. However, such inconsistent Labels
|
||||||
|
// can never match an actual metric, so the method will always return false in
|
||||||
|
// that case.
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
||||||
|
// there for pros and cons of the two methods.
|
||||||
|
func (m *MetricVec) Delete(labels Labels) bool {
|
||||||
|
labels, closer := constrainLabels(m.desc, labels)
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
h, err := m.hashLabels(labels)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletePartialMatch deletes all metrics where the variable labels contain all of those
|
||||||
|
// passed in as labels. The order of the labels does not matter.
|
||||||
|
// It returns the number of metrics deleted.
|
||||||
|
//
|
||||||
|
// Note that curried labels will never be matched if deleting from the curried vector.
|
||||||
|
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
|
||||||
|
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
|
||||||
|
labels, closer := constrainLabels(m.desc, labels)
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
return m.metricMap.deleteByLabels(labels, m.curry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
|
||||||
|
// show up in GoDoc.
|
||||||
|
|
||||||
|
// Describe implements Collector.
|
||||||
|
func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
|
||||||
|
|
||||||
|
// Collect implements Collector.
|
||||||
|
func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
|
||||||
|
|
||||||
|
// Reset deletes all metrics in this vector.
|
||||||
|
func (m *MetricVec) Reset() { m.metricMap.Reset() }
|
||||||
|
|
||||||
|
// CurryWith returns a vector curried with the provided labels, i.e. the
|
||||||
|
// returned vector has those labels pre-set for all labeled operations performed
|
||||||
|
// on it. The cardinality of the curried vector is reduced accordingly. The
|
||||||
|
// order of the remaining labels stays the same (just with the curried labels
|
||||||
|
// taken out of the sequence – which is relevant for the
|
||||||
|
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
|
||||||
|
// vector, but only with labels not yet used for currying before.
|
||||||
|
//
|
||||||
|
// The metrics contained in the MetricVec are shared between the curried and
|
||||||
|
// uncurried vectors. They are just accessed differently. Curried and uncurried
|
||||||
|
// vectors behave identically in terms of collection. Only one must be
|
||||||
|
// registered with a given registry (usually the uncurried version). The Reset
|
||||||
|
// method deletes all metrics, even if called on a curried vector.
|
||||||
|
//
|
||||||
|
// Note that CurryWith is usually not called directly but through a wrapper
|
||||||
|
// around MetricVec, implementing a vector for a specific Metric
|
||||||
|
// implementation, for example GaugeVec.
|
||||||
|
func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
|
||||||
|
var (
|
||||||
|
newCurry []curriedLabelValue
|
||||||
|
oldCurry = m.curry
|
||||||
|
iCurry int
|
||||||
|
)
|
||||||
|
for i, labelName := range m.desc.variableLabels.names {
|
||||||
|
val, ok := labels[labelName]
|
||||||
|
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
|
||||||
|
if ok {
|
||||||
|
return nil, fmt.Errorf("label name %q is already curried", labelName)
|
||||||
|
}
|
||||||
|
newCurry = append(newCurry, oldCurry[iCurry])
|
||||||
|
iCurry++
|
||||||
|
} else {
|
||||||
|
if !ok {
|
||||||
|
continue // Label stays uncurried.
|
||||||
|
}
|
||||||
|
newCurry = append(newCurry, curriedLabelValue{
|
||||||
|
i,
|
||||||
|
m.desc.variableLabels.constrain(labelName, val),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
|
||||||
|
return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MetricVec{
|
||||||
|
metricMap: m.metricMap,
|
||||||
|
curry: newCurry,
|
||||||
|
hashAdd: m.hashAdd,
|
||||||
|
hashAddByte: m.hashAddByte,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricWithLabelValues returns the Metric for the given slice of label
|
||||||
|
// values (same order as the variable labels in Desc). If that combination of
|
||||||
|
// label values is accessed for the first time, a new Metric is created (by
|
||||||
|
// calling the newMetric function provided during construction of the
|
||||||
|
// MetricVec).
|
||||||
|
//
|
||||||
|
// It is possible to call this method without using the returned Metric to only
|
||||||
|
// create the new Metric but leave it in its initial state.
|
||||||
|
//
|
||||||
|
// Keeping the Metric for later use is possible (and should be considered if
|
||||||
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
||||||
|
// Delete can be used to delete the Metric from the MetricVec. In that case, the
|
||||||
|
// Metric will still exist, but it will not be exported anymore, even if a
|
||||||
|
// Metric with the same label values is created later.
|
||||||
|
//
|
||||||
|
// An error is returned if the number of label values is not the same as the
|
||||||
|
// number of variable labels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// Note that for more than one label value, this method is prone to mistakes
|
||||||
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
||||||
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
||||||
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
||||||
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
|
//
|
||||||
|
// Note that GetMetricWithLabelValues is usually not called directly but through
|
||||||
|
// a wrapper around MetricVec, implementing a vector for a specific Metric
|
||||||
|
// implementation, for example GaugeVec.
|
||||||
|
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||||
|
lvs = constrainLabelValues(m.desc, lvs, m.curry)
|
||||||
|
h, err := m.hashLabelValues(lvs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetricWith returns the Metric for the given Labels map (the label names
|
||||||
|
// must match those of the variable labels in Desc). If that label map is
|
||||||
|
// accessed for the first time, a new Metric is created. Implications of
|
||||||
|
// creating a Metric without using it and keeping the Metric for later use
|
||||||
|
// are the same as for GetMetricWithLabelValues.
|
||||||
|
//
|
||||||
|
// An error is returned if the number and names of the Labels are inconsistent
|
||||||
|
// with those of the variable labels in Desc (minus any curried labels).
|
||||||
|
//
|
||||||
|
// This method is used for the same purpose as
|
||||||
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||||
|
// methods.
|
||||||
|
//
|
||||||
|
// Note that GetMetricWith is usually not called directly but through a wrapper
|
||||||
|
// around MetricVec, implementing a vector for a specific Metric implementation,
|
||||||
|
// for example GaugeVec.
|
||||||
|
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||||
|
labels, closer := constrainLabels(m.desc, labels)
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
h, err := m.hashLabels(labels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||||
|
if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
h = hashNew()
|
||||||
|
curry = m.curry
|
||||||
|
iVals, iCurry int
|
||||||
|
)
|
||||||
|
for i := 0; i < len(m.desc.variableLabels.names); i++ {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
h = m.hashAdd(h, curry[iCurry].value)
|
||||||
|
iCurry++
|
||||||
|
} else {
|
||||||
|
h = m.hashAdd(h, vals[iVals])
|
||||||
|
iVals++
|
||||||
|
}
|
||||||
|
h = m.hashAddByte(h, model.SeparatorByte)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
||||||
|
if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
h = hashNew()
|
||||||
|
curry = m.curry
|
||||||
|
iCurry int
|
||||||
|
)
|
||||||
|
for i, labelName := range m.desc.variableLabels.names {
|
||||||
|
val, ok := labels[labelName]
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
if ok {
|
||||||
|
return 0, fmt.Errorf("label name %q is already curried", labelName)
|
||||||
|
}
|
||||||
|
h = m.hashAdd(h, curry[iCurry].value)
|
||||||
|
iCurry++
|
||||||
|
} else {
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("label name %q missing in label map", labelName)
|
||||||
|
}
|
||||||
|
h = m.hashAdd(h, val)
|
||||||
|
}
|
||||||
|
h = m.hashAddByte(h, model.SeparatorByte)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricWithLabelValues provides the metric and its label values for
|
||||||
|
// disambiguation on hash collision.
|
||||||
|
type metricWithLabelValues struct {
|
||||||
|
values []string
|
||||||
|
metric Metric
|
||||||
|
}
|
||||||
|
|
||||||
|
// curriedLabelValue sets the curried value for a label at the given index.
|
||||||
|
type curriedLabelValue struct {
|
||||||
|
index int
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricMap is a helper for metricVec and shared between differently curried
|
||||||
|
// metricVecs.
|
||||||
|
type metricMap struct {
|
||||||
|
mtx sync.RWMutex // Protects metrics.
|
||||||
|
metrics map[uint64][]metricWithLabelValues
|
||||||
|
desc *Desc
|
||||||
|
newMetric func(labelValues ...string) Metric
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe implements Collector. It will send exactly one Desc to the provided
|
||||||
|
// channel.
|
||||||
|
func (m *metricMap) Describe(ch chan<- *Desc) {
|
||||||
|
ch <- m.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect implements Collector.
|
||||||
|
func (m *metricMap) Collect(ch chan<- Metric) {
|
||||||
|
m.mtx.RLock()
|
||||||
|
defer m.mtx.RUnlock()
|
||||||
|
|
||||||
|
for _, metrics := range m.metrics {
|
||||||
|
for _, metric := range metrics {
|
||||||
|
ch <- metric.metric
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset deletes all metrics in this vector.
|
||||||
|
func (m *metricMap) Reset() {
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
for h := range m.metrics {
|
||||||
|
delete(m.metrics, h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
||||||
|
// there are multiple matches in the bucket, use lvs to select a metric and
|
||||||
|
// remove only that metric.
|
||||||
|
func (m *metricMap) deleteByHashWithLabelValues(
|
||||||
|
h uint64, lvs []string, curry []curriedLabelValue,
|
||||||
|
) bool {
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
i := findMetricWithLabelValues(metrics, lvs, curry)
|
||||||
|
if i >= len(metrics) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics) > 1 {
|
||||||
|
old := metrics
|
||||||
|
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
|
||||||
|
old[len(old)-1] = metricWithLabelValues{}
|
||||||
|
} else {
|
||||||
|
delete(m.metrics, h)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
||||||
|
// are multiple matches in the bucket, use lvs to select a metric and remove
|
||||||
|
// only that metric.
|
||||||
|
func (m *metricMap) deleteByHashWithLabels(
|
||||||
|
h uint64, labels Labels, curry []curriedLabelValue,
|
||||||
|
) bool {
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i := findMetricWithLabels(m.desc, metrics, labels, curry)
|
||||||
|
if i >= len(metrics) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics) > 1 {
|
||||||
|
old := metrics
|
||||||
|
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
|
||||||
|
old[len(old)-1] = metricWithLabelValues{}
|
||||||
|
} else {
|
||||||
|
delete(m.metrics, h)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteByLabels deletes a metric if the given labels are present in the metric.
|
||||||
|
func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
var numDeleted int
|
||||||
|
|
||||||
|
for h, metrics := range m.metrics {
|
||||||
|
i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
|
||||||
|
if i >= len(metrics) {
|
||||||
|
// Didn't find matching labels in this metric slice.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(m.metrics, h)
|
||||||
|
numDeleted++
|
||||||
|
}
|
||||||
|
|
||||||
|
return numDeleted
|
||||||
|
}
|
||||||
|
|
||||||
|
// findMetricWithPartialLabel returns the index of the matching metric or
|
||||||
|
// len(metrics) if not found.
|
||||||
|
func findMetricWithPartialLabels(
|
||||||
|
desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
|
||||||
|
) int {
|
||||||
|
for i, metric := range metrics {
|
||||||
|
if matchPartialLabels(desc, metric.values, labels, curry) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexOf searches the given slice of strings for the target string and returns
|
||||||
|
// the index or len(items) as well as a boolean whether the search succeeded.
|
||||||
|
func indexOf(target string, items []string) (int, bool) {
|
||||||
|
for i, l := range items {
|
||||||
|
if l == target {
|
||||||
|
return i, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(items), false
|
||||||
|
}
|
||||||
|
|
||||||
|
// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
|
||||||
|
// and returns whether it matches either the "base" value or the curried value accordingly.
|
||||||
|
// It also indicates whether the match is against a curried or uncurried value.
|
||||||
|
func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
|
||||||
|
for _, curriedValue := range curry {
|
||||||
|
if curriedValue.index == index {
|
||||||
|
// This label was curried. See if the curried value matches our target.
|
||||||
|
return curriedValue.value == targetValue, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// This label was not curried. See if the current value matches our target label.
|
||||||
|
return values[index] == targetValue, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
|
||||||
|
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
||||||
|
for l, v := range labels {
|
||||||
|
// Check if the target label exists in our metrics and get the index.
|
||||||
|
varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
|
||||||
|
if validLabel {
|
||||||
|
// Check the value of that label against the target value.
|
||||||
|
// We don't consider curried values in partial matches.
|
||||||
|
matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
|
||||||
|
if matches && !curried {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||||
|
// or creates it and returns the new one.
|
||||||
|
//
|
||||||
|
// This function holds the mutex.
|
||||||
|
func (m *metricMap) getOrCreateMetricWithLabelValues(
|
||||||
|
hash uint64, lvs []string, curry []curriedLabelValue,
|
||||||
|
) Metric {
|
||||||
|
m.mtx.RLock()
|
||||||
|
metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
|
||||||
|
m.mtx.RUnlock()
|
||||||
|
if ok {
|
||||||
|
return metric
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
|
||||||
|
if !ok {
|
||||||
|
inlinedLVs := inlineLabelValues(lvs, curry)
|
||||||
|
metric = m.newMetric(inlinedLVs...)
|
||||||
|
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
|
||||||
|
}
|
||||||
|
return metric
|
||||||
|
}
|
||||||
|
|
||||||
|
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||||
|
// or creates it and returns the new one.
|
||||||
|
//
|
||||||
|
// This function holds the mutex.
|
||||||
|
func (m *metricMap) getOrCreateMetricWithLabels(
|
||||||
|
hash uint64, labels Labels, curry []curriedLabelValue,
|
||||||
|
) Metric {
|
||||||
|
m.mtx.RLock()
|
||||||
|
metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
|
||||||
|
m.mtx.RUnlock()
|
||||||
|
if ok {
|
||||||
|
return metric
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
|
||||||
|
if !ok {
|
||||||
|
lvs := extractLabelValues(m.desc, labels, curry)
|
||||||
|
metric = m.newMetric(lvs...)
|
||||||
|
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
|
||||||
|
}
|
||||||
|
return metric
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMetricWithHashAndLabelValues gets a metric while handling possible
|
||||||
|
// collisions in the hash space. Must be called while holding the read mutex.
|
||||||
|
func (m *metricMap) getMetricWithHashAndLabelValues(
|
||||||
|
h uint64, lvs []string, curry []curriedLabelValue,
|
||||||
|
) (Metric, bool) {
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
|
if ok {
|
||||||
|
if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
|
||||||
|
return metrics[i].metric, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMetricWithHashAndLabels gets a metric while handling possible collisions in
|
||||||
|
// the hash space. Must be called while holding read mutex.
|
||||||
|
func (m *metricMap) getMetricWithHashAndLabels(
|
||||||
|
h uint64, labels Labels, curry []curriedLabelValue,
|
||||||
|
) (Metric, bool) {
|
||||||
|
metrics, ok := m.metrics[h]
|
||||||
|
if ok {
|
||||||
|
if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
|
||||||
|
return metrics[i].metric, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// findMetricWithLabelValues returns the index of the matching metric or
|
||||||
|
// len(metrics) if not found.
|
||||||
|
func findMetricWithLabelValues(
|
||||||
|
metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
|
||||||
|
) int {
|
||||||
|
for i, metric := range metrics {
|
||||||
|
if matchLabelValues(metric.values, lvs, curry) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
||||||
|
// if not found.
|
||||||
|
func findMetricWithLabels(
|
||||||
|
desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
|
||||||
|
) int {
|
||||||
|
for i, metric := range metrics {
|
||||||
|
if matchLabels(desc, metric.values, labels, curry) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
|
||||||
|
if len(values) != len(lvs)+len(curry) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var iLVs, iCurry int
|
||||||
|
for i, v := range values {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
if v != curry[iCurry].value {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v != lvs[iLVs] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iLVs++
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
||||||
|
if len(values) != len(labels)+len(curry) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iCurry := 0
|
||||||
|
for i, k := range desc.variableLabels.names {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
if values[i] != curry[iCurry].value {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if values[i] != labels[k] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
|
||||||
|
labelValues := make([]string, len(labels)+len(curry))
|
||||||
|
iCurry := 0
|
||||||
|
for i, k := range desc.variableLabels.names {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
labelValues[i] = curry[iCurry].value
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
labelValues[i] = labels[k]
|
||||||
|
}
|
||||||
|
return labelValues
|
||||||
|
}
|
||||||
|
|
||||||
|
func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
|
||||||
|
labelValues := make([]string, len(lvs)+len(curry))
|
||||||
|
var iCurry, iLVs int
|
||||||
|
for i := range labelValues {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
labelValues[i] = curry[iCurry].value
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
labelValues[i] = lvs[iLVs]
|
||||||
|
iLVs++
|
||||||
|
}
|
||||||
|
return labelValues
|
||||||
|
}
|
||||||
|
|
||||||
|
var labelsPool = &sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return make(Labels)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
|
||||||
|
if len(desc.variableLabels.labelConstraints) == 0 {
|
||||||
|
// Fast path when there's no constraints
|
||||||
|
return labels, func() {}
|
||||||
|
}
|
||||||
|
|
||||||
|
constrainedLabels := labelsPool.Get().(Labels)
|
||||||
|
for l, v := range labels {
|
||||||
|
constrainedLabels[l] = desc.variableLabels.constrain(l, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return constrainedLabels, func() {
|
||||||
|
for k := range constrainedLabels {
|
||||||
|
delete(constrainedLabels, k)
|
||||||
|
}
|
||||||
|
labelsPool.Put(constrainedLabels)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
|
||||||
|
if len(desc.variableLabels.labelConstraints) == 0 {
|
||||||
|
// Fast path when there's no constraints
|
||||||
|
return lvs
|
||||||
|
}
|
||||||
|
|
||||||
|
constrainedValues := make([]string, len(lvs))
|
||||||
|
var iCurry, iLVs int
|
||||||
|
for i := 0; i < len(lvs)+len(curry); i++ {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < len(desc.variableLabels.names) {
|
||||||
|
constrainedValues[iLVs] = desc.variableLabels.constrain(
|
||||||
|
desc.variableLabels.names[i],
|
||||||
|
lvs[iLVs],
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
constrainedValues[iLVs] = lvs[iLVs]
|
||||||
|
}
|
||||||
|
iLVs++
|
||||||
|
}
|
||||||
|
return constrainedValues
|
||||||
|
}
|
23
vendor/github.com/prometheus/client_golang/prometheus/vnext.go
generated
vendored
Normal file
23
vendor/github.com/prometheus/client_golang/prometheus/vnext.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
type v2 struct{}
|
||||||
|
|
||||||
|
// V2 is a struct that can be referenced to access experimental API that might
|
||||||
|
// be present in v2 of client golang someday. It offers extended functionality
|
||||||
|
// of v1 with slightly changed API. It is acceptable to use some pieces from v1
|
||||||
|
// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
|
||||||
|
// in the same codebase.
|
||||||
|
var V2 = v2{}
|
214
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
214
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WrapRegistererWith returns a Registerer wrapping the provided
|
||||||
|
// Registerer. Collectors registered with the returned Registerer will be
|
||||||
|
// registered with the wrapped Registerer in a modified way. The modified
|
||||||
|
// Collector adds the provided Labels to all Metrics it collects (as
|
||||||
|
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||||
|
// duplicate any of those labels. Wrapping a nil value is valid, resulting
|
||||||
|
// in a no-op Registerer.
|
||||||
|
//
|
||||||
|
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||||
|
// Collectors. It should not be used to add fixed labels to all metrics
|
||||||
|
// exposed. See also
|
||||||
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
|
||||||
|
//
|
||||||
|
// Conflicts between Collectors registered through the original Registerer with
|
||||||
|
// Collectors registered through the wrapping Registerer will still be
|
||||||
|
// detected. Any AlreadyRegisteredError returned by the Register method of
|
||||||
|
// either Registerer will contain the ExistingCollector in the form it was
|
||||||
|
// provided to the respective registry.
|
||||||
|
//
|
||||||
|
// The Collector example demonstrates a use of WrapRegistererWith.
|
||||||
|
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||||
|
return &wrappingRegisterer{
|
||||||
|
wrappedRegisterer: reg,
|
||||||
|
labels: labels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapRegistererWithPrefix returns a Registerer wrapping the provided
|
||||||
|
// Registerer. Collectors registered with the returned Registerer will be
|
||||||
|
// registered with the wrapped Registerer in a modified way. The modified
|
||||||
|
// Collector adds the provided prefix to the name of all Metrics it collects.
|
||||||
|
// Wrapping a nil value is valid, resulting in a no-op Registerer.
|
||||||
|
//
|
||||||
|
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
||||||
|
// a sub-system. To make this work, register metrics of the sub-system with the
|
||||||
|
// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
|
||||||
|
// to use the same prefix for all metrics exposed. In particular, do not prefix
|
||||||
|
// metric names that are standardized across applications, as that would break
|
||||||
|
// horizontal monitoring, for example the metrics provided by the Go collector
|
||||||
|
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
||||||
|
// fact, those metrics are already prefixed with “go_” or “process_”,
|
||||||
|
// respectively.)
|
||||||
|
//
|
||||||
|
// Conflicts between Collectors registered through the original Registerer with
|
||||||
|
// Collectors registered through the wrapping Registerer will still be
|
||||||
|
// detected. Any AlreadyRegisteredError returned by the Register method of
|
||||||
|
// either Registerer will contain the ExistingCollector in the form it was
|
||||||
|
// provided to the respective registry.
|
||||||
|
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
||||||
|
return &wrappingRegisterer{
|
||||||
|
wrappedRegisterer: reg,
|
||||||
|
prefix: prefix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappingRegisterer struct {
|
||||||
|
wrappedRegisterer Registerer
|
||||||
|
prefix string
|
||||||
|
labels Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *wrappingRegisterer) Register(c Collector) error {
|
||||||
|
if r.wrappedRegisterer == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return r.wrappedRegisterer.Register(&wrappingCollector{
|
||||||
|
wrappedCollector: c,
|
||||||
|
prefix: r.prefix,
|
||||||
|
labels: r.labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||||
|
if r.wrappedRegisterer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, c := range cs {
|
||||||
|
if err := r.Register(c); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
||||||
|
if r.wrappedRegisterer == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
||||||
|
wrappedCollector: c,
|
||||||
|
prefix: r.prefix,
|
||||||
|
labels: r.labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappingCollector struct {
|
||||||
|
wrappedCollector Collector
|
||||||
|
prefix string
|
||||||
|
labels Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *wrappingCollector) Collect(ch chan<- Metric) {
|
||||||
|
wrappedCh := make(chan Metric)
|
||||||
|
go func() {
|
||||||
|
c.wrappedCollector.Collect(wrappedCh)
|
||||||
|
close(wrappedCh)
|
||||||
|
}()
|
||||||
|
for m := range wrappedCh {
|
||||||
|
ch <- &wrappingMetric{
|
||||||
|
wrappedMetric: m,
|
||||||
|
prefix: c.prefix,
|
||||||
|
labels: c.labels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *wrappingCollector) Describe(ch chan<- *Desc) {
|
||||||
|
wrappedCh := make(chan *Desc)
|
||||||
|
go func() {
|
||||||
|
c.wrappedCollector.Describe(wrappedCh)
|
||||||
|
close(wrappedCh)
|
||||||
|
}()
|
||||||
|
for desc := range wrappedCh {
|
||||||
|
ch <- wrapDesc(desc, c.prefix, c.labels)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *wrappingCollector) unwrapRecursively() Collector {
|
||||||
|
switch wc := c.wrappedCollector.(type) {
|
||||||
|
case *wrappingCollector:
|
||||||
|
return wc.unwrapRecursively()
|
||||||
|
default:
|
||||||
|
return wc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrappingMetric struct {
|
||||||
|
wrappedMetric Metric
|
||||||
|
prefix string
|
||||||
|
labels Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *wrappingMetric) Desc() *Desc {
|
||||||
|
return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *wrappingMetric) Write(out *dto.Metric) error {
|
||||||
|
if err := m.wrappedMetric.Write(out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(m.labels) == 0 {
|
||||||
|
// No wrapping labels.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for ln, lv := range m.labels {
|
||||||
|
out.Label = append(out.Label, &dto.LabelPair{
|
||||||
|
Name: proto.String(ln),
|
||||||
|
Value: proto.String(lv),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sort.Sort(internal.LabelPairSorter(out.Label))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
|
||||||
|
constLabels := Labels{}
|
||||||
|
for _, lp := range desc.constLabelPairs {
|
||||||
|
constLabels[*lp.Name] = *lp.Value
|
||||||
|
}
|
||||||
|
for ln, lv := range labels {
|
||||||
|
if _, alreadyUsed := constLabels[ln]; alreadyUsed {
|
||||||
|
return &Desc{
|
||||||
|
fqName: desc.fqName,
|
||||||
|
help: desc.help,
|
||||||
|
variableLabels: desc.variableLabels,
|
||||||
|
constLabelPairs: desc.constLabelPairs,
|
||||||
|
err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
constLabels[ln] = lv
|
||||||
|
}
|
||||||
|
// NewDesc will do remaining validations.
|
||||||
|
newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
||||||
|
// Propagate errors if there was any. This will override any errer
|
||||||
|
// created by NewDesc above, i.e. earlier errors get precedence.
|
||||||
|
if desc.err != nil {
|
||||||
|
newDesc.err = desc.err
|
||||||
|
}
|
||||||
|
return newDesc
|
||||||
|
}
|
201
vendor/github.com/prometheus/client_model/LICENSE
generated
vendored
Normal file
201
vendor/github.com/prometheus/client_model/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
5
vendor/github.com/prometheus/client_model/NOTICE
generated
vendored
Normal file
5
vendor/github.com/prometheus/client_model/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
Data model artifacts for Prometheus.
|
||||||
|
Copyright 2012-2015 The Prometheus Authors
|
||||||
|
|
||||||
|
This product includes software developed at
|
||||||
|
SoundCloud Ltd. (http://soundcloud.com/).
|
1376
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
Normal file
1376
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
201
vendor/github.com/prometheus/common/LICENSE
generated
vendored
Normal file
201
vendor/github.com/prometheus/common/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
5
vendor/github.com/prometheus/common/NOTICE
generated
vendored
Normal file
5
vendor/github.com/prometheus/common/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
Common libraries shared by Prometheus Go components.
|
||||||
|
Copyright 2015 The Prometheus Authors
|
||||||
|
|
||||||
|
This product includes software developed at
|
||||||
|
SoundCloud Ltd. (http://soundcloud.com/).
|
428
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
Normal file
428
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,428 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package expfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decoder types decode an input stream into metric families.
|
||||||
|
type Decoder interface {
|
||||||
|
Decode(*dto.MetricFamily) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeOptions contains options used by the Decoder and in sample extraction.
|
||||||
|
type DecodeOptions struct {
|
||||||
|
// Timestamp is added to each value from the stream that has no explicit timestamp set.
|
||||||
|
Timestamp model.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponseFormat extracts the correct format from a HTTP response header.
|
||||||
|
// If no matching format can be found FormatUnknown is returned.
|
||||||
|
func ResponseFormat(h http.Header) Format {
|
||||||
|
ct := h.Get(hdrContentType)
|
||||||
|
|
||||||
|
mediatype, params, err := mime.ParseMediaType(ct)
|
||||||
|
if err != nil {
|
||||||
|
return FmtUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
const textType = "text/plain"
|
||||||
|
|
||||||
|
switch mediatype {
|
||||||
|
case ProtoType:
|
||||||
|
if p, ok := params["proto"]; ok && p != ProtoProtocol {
|
||||||
|
return FmtUnknown
|
||||||
|
}
|
||||||
|
if e, ok := params["encoding"]; ok && e != "delimited" {
|
||||||
|
return FmtUnknown
|
||||||
|
}
|
||||||
|
return FmtProtoDelim
|
||||||
|
|
||||||
|
case textType:
|
||||||
|
if v, ok := params["version"]; ok && v != TextVersion {
|
||||||
|
return FmtUnknown
|
||||||
|
}
|
||||||
|
return FmtText
|
||||||
|
}
|
||||||
|
|
||||||
|
return FmtUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a new decoder based on the given input format.
|
||||||
|
// If the input format does not imply otherwise, a text format decoder is returned.
|
||||||
|
func NewDecoder(r io.Reader, format Format) Decoder {
|
||||||
|
switch format {
|
||||||
|
case FmtProtoDelim:
|
||||||
|
return &protoDecoder{r: r}
|
||||||
|
}
|
||||||
|
return &textDecoder{r: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
// protoDecoder implements the Decoder interface for protocol buffers.
|
||||||
|
type protoDecoder struct {
|
||||||
|
r io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode implements the Decoder interface.
|
||||||
|
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||||
|
_, err := pbutil.ReadDelimited(d.r, v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||||
|
return fmt.Errorf("invalid metric name %q", v.GetName())
|
||||||
|
}
|
||||||
|
for _, m := range v.GetMetric() {
|
||||||
|
if m == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, l := range m.GetLabel() {
|
||||||
|
if l == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !model.LabelValue(l.GetValue()).IsValid() {
|
||||||
|
return fmt.Errorf("invalid label value %q", l.GetValue())
|
||||||
|
}
|
||||||
|
if !model.LabelName(l.GetName()).IsValid() {
|
||||||
|
return fmt.Errorf("invalid label name %q", l.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// textDecoder implements the Decoder interface for the text protocol.
|
||||||
|
type textDecoder struct {
|
||||||
|
r io.Reader
|
||||||
|
fams map[string]*dto.MetricFamily
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode implements the Decoder interface.
|
||||||
|
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
||||||
|
if d.err == nil {
|
||||||
|
// Read all metrics in one shot.
|
||||||
|
var p TextParser
|
||||||
|
d.fams, d.err = p.TextToMetricFamilies(d.r)
|
||||||
|
// If we don't get an error, store io.EOF for the end.
|
||||||
|
if d.err == nil {
|
||||||
|
d.err = io.EOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Pick off one MetricFamily per Decode until there's nothing left.
|
||||||
|
for key, fam := range d.fams {
|
||||||
|
v.Name = fam.Name
|
||||||
|
v.Help = fam.Help
|
||||||
|
v.Type = fam.Type
|
||||||
|
v.Metric = fam.Metric
|
||||||
|
delete(d.fams, key)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return d.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SampleDecoder wraps a Decoder to extract samples from the metric families
|
||||||
|
// decoded by the wrapped Decoder.
|
||||||
|
type SampleDecoder struct {
|
||||||
|
Dec Decoder
|
||||||
|
Opts *DecodeOptions
|
||||||
|
|
||||||
|
f dto.MetricFamily
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode calls the Decode method of the wrapped Decoder and then extracts the
|
||||||
|
// samples from the decoded MetricFamily into the provided model.Vector.
|
||||||
|
func (sd *SampleDecoder) Decode(s *model.Vector) error {
|
||||||
|
err := sd.Dec.Decode(&sd.f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*s, err = extractSamples(&sd.f, sd.Opts)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtractSamples builds a slice of samples from the provided metric
|
||||||
|
// families. If an error occurs during sample extraction, it continues to
|
||||||
|
// extract from the remaining metric families. The returned error is the last
|
||||||
|
// error that has occurred.
|
||||||
|
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
|
||||||
|
var (
|
||||||
|
all model.Vector
|
||||||
|
lastErr error
|
||||||
|
)
|
||||||
|
for _, f := range fams {
|
||||||
|
some, err := extractSamples(f, o)
|
||||||
|
if err != nil {
|
||||||
|
lastErr = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
all = append(all, some...)
|
||||||
|
}
|
||||||
|
return all, lastErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
|
||||||
|
switch f.GetType() {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
return extractCounter(o, f), nil
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
return extractGauge(o, f), nil
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
return extractSummary(o, f), nil
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
return extractUntyped(o, f), nil
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
return extractHistogram(o, f), nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||||
|
samples := make(model.Vector, 0, len(f.Metric))
|
||||||
|
|
||||||
|
for _, m := range f.Metric {
|
||||||
|
if m.Counter == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
lset := make(model.LabelSet, len(m.Label)+1)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
||||||
|
|
||||||
|
smpl := &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: model.SampleValue(m.Counter.GetValue()),
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.TimestampMs != nil {
|
||||||
|
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||||
|
} else {
|
||||||
|
smpl.Timestamp = o.Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
samples = append(samples, smpl)
|
||||||
|
}
|
||||||
|
|
||||||
|
return samples
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||||
|
samples := make(model.Vector, 0, len(f.Metric))
|
||||||
|
|
||||||
|
for _, m := range f.Metric {
|
||||||
|
if m.Gauge == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
lset := make(model.LabelSet, len(m.Label)+1)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
||||||
|
|
||||||
|
smpl := &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: model.SampleValue(m.Gauge.GetValue()),
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.TimestampMs != nil {
|
||||||
|
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||||
|
} else {
|
||||||
|
smpl.Timestamp = o.Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
samples = append(samples, smpl)
|
||||||
|
}
|
||||||
|
|
||||||
|
return samples
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||||
|
samples := make(model.Vector, 0, len(f.Metric))
|
||||||
|
|
||||||
|
for _, m := range f.Metric {
|
||||||
|
if m.Untyped == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
lset := make(model.LabelSet, len(m.Label)+1)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
||||||
|
|
||||||
|
smpl := &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: model.SampleValue(m.Untyped.GetValue()),
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.TimestampMs != nil {
|
||||||
|
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||||
|
} else {
|
||||||
|
smpl.Timestamp = o.Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
samples = append(samples, smpl)
|
||||||
|
}
|
||||||
|
|
||||||
|
return samples
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||||
|
samples := make(model.Vector, 0, len(f.Metric))
|
||||||
|
|
||||||
|
for _, m := range f.Metric {
|
||||||
|
if m.Summary == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := o.Timestamp
|
||||||
|
if m.TimestampMs != nil {
|
||||||
|
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, q := range m.Summary.Quantile {
|
||||||
|
lset := make(model.LabelSet, len(m.Label)+2)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
// BUG(matt): Update other names to "quantile".
|
||||||
|
lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
||||||
|
|
||||||
|
samples = append(samples, &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: model.SampleValue(q.GetValue()),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
lset := make(model.LabelSet, len(m.Label)+1)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
|
||||||
|
|
||||||
|
samples = append(samples, &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: model.SampleValue(m.Summary.GetSampleSum()),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
})
|
||||||
|
|
||||||
|
lset = make(model.LabelSet, len(m.Label)+1)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
|
||||||
|
|
||||||
|
samples = append(samples, &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: model.SampleValue(m.Summary.GetSampleCount()),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return samples
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||||
|
samples := make(model.Vector, 0, len(f.Metric))
|
||||||
|
|
||||||
|
for _, m := range f.Metric {
|
||||||
|
if m.Histogram == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := o.Timestamp
|
||||||
|
if m.TimestampMs != nil {
|
||||||
|
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||||
|
}
|
||||||
|
|
||||||
|
infSeen := false
|
||||||
|
|
||||||
|
for _, q := range m.Histogram.Bucket {
|
||||||
|
lset := make(model.LabelSet, len(m.Label)+2)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
|
||||||
|
|
||||||
|
if math.IsInf(q.GetUpperBound(), +1) {
|
||||||
|
infSeen = true
|
||||||
|
}
|
||||||
|
|
||||||
|
samples = append(samples, &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: model.SampleValue(q.GetCumulativeCount()),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
lset := make(model.LabelSet, len(m.Label)+1)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
|
||||||
|
|
||||||
|
samples = append(samples, &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: model.SampleValue(m.Histogram.GetSampleSum()),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
})
|
||||||
|
|
||||||
|
lset = make(model.LabelSet, len(m.Label)+1)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
|
||||||
|
|
||||||
|
count := &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: model.SampleValue(m.Histogram.GetSampleCount()),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
}
|
||||||
|
samples = append(samples, count)
|
||||||
|
|
||||||
|
if !infSeen {
|
||||||
|
// Append an infinity bucket sample.
|
||||||
|
lset := make(model.LabelSet, len(m.Label)+2)
|
||||||
|
for _, p := range m.Label {
|
||||||
|
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||||
|
}
|
||||||
|
lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
|
||||||
|
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
|
||||||
|
|
||||||
|
samples = append(samples, &model.Sample{
|
||||||
|
Metric: model.Metric(lset),
|
||||||
|
Value: count.Value,
|
||||||
|
Timestamp: timestamp,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return samples
|
||||||
|
}
|
165
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
Normal file
165
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,165 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package expfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||||
|
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||||
|
"google.golang.org/protobuf/encoding/prototext"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encoder types encode metric families into an underlying wire protocol.
|
||||||
|
type Encoder interface {
|
||||||
|
Encode(*dto.MetricFamily) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Closer is implemented by Encoders that need to be closed to finalize
|
||||||
|
// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
|
||||||
|
//
|
||||||
|
// Note that all Encoder implementations returned from this package implement
|
||||||
|
// Closer, too, even if the Close call is a no-op. This happens in preparation
|
||||||
|
// for adding a Close method to the Encoder interface directly in a (mildly
|
||||||
|
// breaking) release in the future.
|
||||||
|
type Closer interface {
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type encoderCloser struct {
|
||||||
|
encode func(*dto.MetricFamily) error
|
||||||
|
close func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
|
||||||
|
return ec.encode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec encoderCloser) Close() error {
|
||||||
|
return ec.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Negotiate returns the Content-Type based on the given Accept header. If no
|
||||||
|
// appropriate accepted type is found, FmtText is returned (which is the
|
||||||
|
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
|
||||||
|
// as the support is still experimental. To include the option to negotiate
|
||||||
|
// FmtOpenMetrics, use NegotiateOpenMetrics.
|
||||||
|
func Negotiate(h http.Header) Format {
|
||||||
|
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||||
|
ver := ac.Params["version"]
|
||||||
|
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||||
|
switch ac.Params["encoding"] {
|
||||||
|
case "delimited":
|
||||||
|
return FmtProtoDelim
|
||||||
|
case "text":
|
||||||
|
return FmtProtoText
|
||||||
|
case "compact-text":
|
||||||
|
return FmtProtoCompact
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||||
|
return FmtText
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return FmtText
|
||||||
|
}
|
||||||
|
|
||||||
|
// NegotiateIncludingOpenMetrics works like Negotiate but includes
|
||||||
|
// FmtOpenMetrics as an option for the result. Note that this function is
|
||||||
|
// temporary and will disappear once FmtOpenMetrics is fully supported and as
|
||||||
|
// such may be negotiated by the normal Negotiate function.
|
||||||
|
func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||||
|
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||||
|
ver := ac.Params["version"]
|
||||||
|
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||||
|
switch ac.Params["encoding"] {
|
||||||
|
case "delimited":
|
||||||
|
return FmtProtoDelim
|
||||||
|
case "text":
|
||||||
|
return FmtProtoText
|
||||||
|
case "compact-text":
|
||||||
|
return FmtProtoCompact
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||||
|
return FmtText
|
||||||
|
}
|
||||||
|
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
|
||||||
|
if ver == OpenMetricsVersion_1_0_0 {
|
||||||
|
return FmtOpenMetrics_1_0_0
|
||||||
|
}
|
||||||
|
return FmtOpenMetrics_0_0_1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return FmtText
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns a new encoder based on content type negotiation. All
|
||||||
|
// Encoder implementations returned by NewEncoder also implement Closer, and
|
||||||
|
// callers should always call the Close method. It is currently only required
|
||||||
|
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
|
||||||
|
// to the Encoder interface directly. The current version of the Encoder
|
||||||
|
// interface is kept for backwards compatibility.
|
||||||
|
func NewEncoder(w io.Writer, format Format) Encoder {
|
||||||
|
switch format {
|
||||||
|
case FmtProtoDelim:
|
||||||
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
|
_, err := pbutil.WriteDelimited(w, v)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
close: func() error { return nil },
|
||||||
|
}
|
||||||
|
case FmtProtoCompact:
|
||||||
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
|
_, err := fmt.Fprintln(w, v.String())
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
close: func() error { return nil },
|
||||||
|
}
|
||||||
|
case FmtProtoText:
|
||||||
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
|
_, err := fmt.Fprintln(w, prototext.Format(v))
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
close: func() error { return nil },
|
||||||
|
}
|
||||||
|
case FmtText:
|
||||||
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
|
_, err := MetricFamilyToText(w, v)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
close: func() error { return nil },
|
||||||
|
}
|
||||||
|
case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0:
|
||||||
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
|
_, err := MetricFamilyToOpenMetrics(w, v)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
close: func() error {
|
||||||
|
_, err := FinalizeOpenMetrics(w)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
|
||||||
|
}
|
43
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
Normal file
43
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package expfmt contains tools for reading and writing Prometheus metrics.
|
||||||
|
package expfmt
|
||||||
|
|
||||||
|
// Format specifies the HTTP content type of the different wire protocols.
|
||||||
|
type Format string
|
||||||
|
|
||||||
|
// Constants to assemble the Content-Type values for the different wire protocols.
|
||||||
|
const (
|
||||||
|
TextVersion = "0.0.4"
|
||||||
|
ProtoType = `application/vnd.google.protobuf`
|
||||||
|
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||||
|
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||||
|
OpenMetricsType = `application/openmetrics-text`
|
||||||
|
OpenMetricsVersion_0_0_1 = "0.0.1"
|
||||||
|
OpenMetricsVersion_1_0_0 = "1.0.0"
|
||||||
|
|
||||||
|
// The Content-Type values for the different wire protocols.
|
||||||
|
FmtUnknown Format = `<unknown>`
|
||||||
|
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||||
|
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||||
|
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||||
|
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||||
|
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
|
||||||
|
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
hdrContentType = "Content-Type"
|
||||||
|
hdrAccept = "Accept"
|
||||||
|
)
|
37
vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
Normal file
37
vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Build only when actually fuzzing
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package expfmt
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
|
||||||
|
//
|
||||||
|
// go-fuzz-build github.com/prometheus/common/expfmt
|
||||||
|
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
|
||||||
|
//
|
||||||
|
// Further input samples should go in the folder fuzz/corpus.
|
||||||
|
func Fuzz(in []byte) int {
|
||||||
|
parser := TextParser{}
|
||||||
|
_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1
|
||||||
|
}
|
527
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
Normal file
527
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
Normal file
|
@ -0,0 +1,527 @@
|
||||||
|
// Copyright 2020 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package expfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
|
||||||
|
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
|
||||||
|
// the number of bytes written and any error encountered. The output will have
|
||||||
|
// the same order as the input, no further sorting is performed. Furthermore,
|
||||||
|
// this function assumes the input is already sanitized and does not perform any
|
||||||
|
// sanity checks. If the input contains duplicate metrics or invalid metric or
|
||||||
|
// label names, the conversion will result in invalid text format output.
|
||||||
|
//
|
||||||
|
// This function fulfills the type 'expfmt.encoder'.
|
||||||
|
//
|
||||||
|
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
|
||||||
|
// on individual metric families, it is the responsibility of the caller to
|
||||||
|
// append this line to 'out' once all metric families have been written.
|
||||||
|
// Conveniently, this can be done by calling FinalizeOpenMetrics.
|
||||||
|
//
|
||||||
|
// The output should be fully OpenMetrics compliant. However, there are a few
|
||||||
|
// missing features and peculiarities to avoid complications when switching from
|
||||||
|
// Prometheus to OpenMetrics or vice versa:
|
||||||
|
//
|
||||||
|
// - Counters are expected to have the `_total` suffix in their metric name. In
|
||||||
|
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
|
||||||
|
// line. A counter with a missing `_total` suffix is not an error. However,
|
||||||
|
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
|
||||||
|
// output.
|
||||||
|
//
|
||||||
|
// - No support for the following (optional) features: `# UNIT` line, `_created`
|
||||||
|
// line, info type, stateset type, gaugehistogram type.
|
||||||
|
//
|
||||||
|
// - The size of exemplar labels is not checked (i.e. it's possible to create
|
||||||
|
// exemplars that are larger than allowed by the OpenMetrics specification).
|
||||||
|
//
|
||||||
|
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
|
||||||
|
// with a `NaN` value.)
|
||||||
|
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||||
|
name := in.GetName()
|
||||||
|
if name == "" {
|
||||||
|
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try the interface upgrade. If it doesn't work, we'll use a
|
||||||
|
// bufio.Writer from the sync.Pool.
|
||||||
|
w, ok := out.(enhancedWriter)
|
||||||
|
if !ok {
|
||||||
|
b := bufPool.Get().(*bufio.Writer)
|
||||||
|
b.Reset(out)
|
||||||
|
w = b
|
||||||
|
defer func() {
|
||||||
|
bErr := b.Flush()
|
||||||
|
if err == nil {
|
||||||
|
err = bErr
|
||||||
|
}
|
||||||
|
bufPool.Put(b)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
n int
|
||||||
|
metricType = in.GetType()
|
||||||
|
shortName = name
|
||||||
|
)
|
||||||
|
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
|
||||||
|
shortName = name[:len(name)-6]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Comments, first HELP, then TYPE.
|
||||||
|
if in.Help != nil {
|
||||||
|
n, err = w.WriteString("# HELP ")
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(shortName)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeEscapedString(w, *in.Help, true)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = w.WriteString("# TYPE ")
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(shortName)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch metricType {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
if strings.HasSuffix(name, "_total") {
|
||||||
|
n, err = w.WriteString(" counter\n")
|
||||||
|
} else {
|
||||||
|
n, err = w.WriteString(" unknown\n")
|
||||||
|
}
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
n, err = w.WriteString(" gauge\n")
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
n, err = w.WriteString(" summary\n")
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
n, err = w.WriteString(" unknown\n")
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
n, err = w.WriteString(" histogram\n")
|
||||||
|
default:
|
||||||
|
return written, fmt.Errorf("unknown metric type %s", metricType.String())
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally the samples, one line for each.
|
||||||
|
for _, metric := range in.Metric {
|
||||||
|
switch metricType {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
if metric.Counter == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected counter in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// Note that we have ensured above that either the name
|
||||||
|
// ends on `_total` or that the rendered type is
|
||||||
|
// `unknown`. Therefore, no `_total` must be added here.
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "", metric, "", 0,
|
||||||
|
metric.Counter.GetValue(), 0, false,
|
||||||
|
metric.Counter.Exemplar,
|
||||||
|
)
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
if metric.Gauge == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected gauge in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "", metric, "", 0,
|
||||||
|
metric.Gauge.GetValue(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
if metric.Untyped == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected untyped in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "", metric, "", 0,
|
||||||
|
metric.Untyped.GetValue(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
if metric.Summary == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected summary in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
for _, q := range metric.Summary.Quantile {
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "", metric,
|
||||||
|
model.QuantileLabel, q.GetQuantile(),
|
||||||
|
q.GetValue(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_sum", metric, "", 0,
|
||||||
|
metric.Summary.GetSampleSum(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_count", metric, "", 0,
|
||||||
|
0, metric.Summary.GetSampleCount(), true,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
if metric.Histogram == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected histogram in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
infSeen := false
|
||||||
|
for _, b := range metric.Histogram.Bucket {
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_bucket", metric,
|
||||||
|
model.BucketLabel, b.GetUpperBound(),
|
||||||
|
0, b.GetCumulativeCount(), true,
|
||||||
|
b.Exemplar,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if math.IsInf(b.GetUpperBound(), +1) {
|
||||||
|
infSeen = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !infSeen {
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_bucket", metric,
|
||||||
|
model.BucketLabel, math.Inf(+1),
|
||||||
|
0, metric.Histogram.GetSampleCount(), true,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_sum", metric, "", 0,
|
||||||
|
metric.Histogram.GetSampleSum(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_count", metric, "", 0,
|
||||||
|
0, metric.Histogram.GetSampleCount(), true,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
default:
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"unexpected type in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
|
||||||
|
func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
|
||||||
|
return w.Write([]byte("# EOF\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
|
||||||
|
// w, given the metric name, the metric proto message itself, optionally an
|
||||||
|
// additional label name with a float64 value (use empty string as label name if
|
||||||
|
// not required), the value (optionally as float64 or uint64, determined by
|
||||||
|
// useIntValue), and optionally an exemplar (use nil if not required). The
|
||||||
|
// function returns the number of bytes written and any error encountered.
|
||||||
|
func writeOpenMetricsSample(
|
||||||
|
w enhancedWriter,
|
||||||
|
name, suffix string,
|
||||||
|
metric *dto.Metric,
|
||||||
|
additionalLabelName string, additionalLabelValue float64,
|
||||||
|
floatValue float64, intValue uint64, useIntValue bool,
|
||||||
|
exemplar *dto.Exemplar,
|
||||||
|
) (int, error) {
|
||||||
|
var written int
|
||||||
|
n, err := w.WriteString(name)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if suffix != "" {
|
||||||
|
n, err = w.WriteString(suffix)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsLabelPairs(
|
||||||
|
w, metric.Label, additionalLabelName, additionalLabelValue,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if useIntValue {
|
||||||
|
n, err = writeUint(w, intValue)
|
||||||
|
} else {
|
||||||
|
n, err = writeOpenMetricsFloat(w, floatValue)
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if metric.TimestampMs != nil {
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
// TODO(beorn7): Format this directly without converting to a float first.
|
||||||
|
n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if exemplar != nil {
|
||||||
|
n, err = writeExemplar(w, exemplar)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
|
||||||
|
// in OpenMetrics style.
|
||||||
|
func writeOpenMetricsLabelPairs(
|
||||||
|
w enhancedWriter,
|
||||||
|
in []*dto.LabelPair,
|
||||||
|
additionalLabelName string, additionalLabelValue float64,
|
||||||
|
) (int, error) {
|
||||||
|
if len(in) == 0 && additionalLabelName == "" {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
written int
|
||||||
|
separator byte = '{'
|
||||||
|
)
|
||||||
|
for _, lp := range in {
|
||||||
|
err := w.WriteByte(separator)
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err := w.WriteString(lp.GetName())
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(`="`)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeEscapedString(w, lp.GetValue(), true)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte('"')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
separator = ','
|
||||||
|
}
|
||||||
|
if additionalLabelName != "" {
|
||||||
|
err := w.WriteByte(separator)
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err := w.WriteString(additionalLabelName)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(`="`)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsFloat(w, additionalLabelValue)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte('"')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := w.WriteByte('}')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
|
||||||
|
// function returns the number of bytes written and any error encountered.
|
||||||
|
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
||||||
|
written := 0
|
||||||
|
n, err := w.WriteString(" # ")
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsFloat(w, e.GetValue())
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if e.Timestamp != nil {
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = (*e).Timestamp.CheckValid()
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
ts := (*e).Timestamp.AsTime()
|
||||||
|
// TODO(beorn7): Format this directly from components of ts to
|
||||||
|
// avoid overflow/underflow and precision issues of the float
|
||||||
|
// conversion.
|
||||||
|
n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
|
||||||
|
// number would otherwise contain neither a "." nor an "e".
|
||||||
|
func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
|
||||||
|
switch {
|
||||||
|
case f == 1:
|
||||||
|
return w.WriteString("1.0")
|
||||||
|
case f == 0:
|
||||||
|
return w.WriteString("0.0")
|
||||||
|
case f == -1:
|
||||||
|
return w.WriteString("-1.0")
|
||||||
|
case math.IsNaN(f):
|
||||||
|
return w.WriteString("NaN")
|
||||||
|
case math.IsInf(f, +1):
|
||||||
|
return w.WriteString("+Inf")
|
||||||
|
case math.IsInf(f, -1):
|
||||||
|
return w.WriteString("-Inf")
|
||||||
|
default:
|
||||||
|
bp := numBufPool.Get().(*[]byte)
|
||||||
|
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
|
||||||
|
if !bytes.ContainsAny(*bp, "e.") {
|
||||||
|
*bp = append(*bp, '.', '0')
|
||||||
|
}
|
||||||
|
written, err := w.Write(*bp)
|
||||||
|
numBufPool.Put(bp)
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeUint is like writeInt just for uint64.
|
||||||
|
func writeUint(w enhancedWriter, u uint64) (int, error) {
|
||||||
|
bp := numBufPool.Get().(*[]byte)
|
||||||
|
*bp = strconv.AppendUint((*bp)[:0], u, 10)
|
||||||
|
written, err := w.Write(*bp)
|
||||||
|
numBufPool.Put(bp)
|
||||||
|
return written, err
|
||||||
|
}
|
464
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
Normal file
464
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
Normal file
|
@ -0,0 +1,464 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package expfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
|
||||||
|
// implements it.
|
||||||
|
type enhancedWriter interface {
|
||||||
|
io.Writer
|
||||||
|
WriteRune(r rune) (n int, err error)
|
||||||
|
WriteString(s string) (n int, err error)
|
||||||
|
WriteByte(c byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
initialNumBufSize = 24
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bufPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return bufio.NewWriter(io.Discard)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
numBufPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
b := make([]byte, 0, initialNumBufSize)
|
||||||
|
return &b
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
||||||
|
// writes the resulting lines to 'out'. It returns the number of bytes written
|
||||||
|
// and any error encountered. The output will have the same order as the input,
|
||||||
|
// no further sorting is performed. Furthermore, this function assumes the input
|
||||||
|
// is already sanitized and does not perform any sanity checks. If the input
|
||||||
|
// contains duplicate metrics or invalid metric or label names, the conversion
|
||||||
|
// will result in invalid text format output.
|
||||||
|
//
|
||||||
|
// This method fulfills the type 'prometheus.encoder'.
|
||||||
|
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||||
|
// Fail-fast checks.
|
||||||
|
if len(in.Metric) == 0 {
|
||||||
|
return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
|
||||||
|
}
|
||||||
|
name := in.GetName()
|
||||||
|
if name == "" {
|
||||||
|
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try the interface upgrade. If it doesn't work, we'll use a
|
||||||
|
// bufio.Writer from the sync.Pool.
|
||||||
|
w, ok := out.(enhancedWriter)
|
||||||
|
if !ok {
|
||||||
|
b := bufPool.Get().(*bufio.Writer)
|
||||||
|
b.Reset(out)
|
||||||
|
w = b
|
||||||
|
defer func() {
|
||||||
|
bErr := b.Flush()
|
||||||
|
if err == nil {
|
||||||
|
err = bErr
|
||||||
|
}
|
||||||
|
bufPool.Put(b)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
var n int
|
||||||
|
|
||||||
|
// Comments, first HELP, then TYPE.
|
||||||
|
if in.Help != nil {
|
||||||
|
n, err = w.WriteString("# HELP ")
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(name)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeEscapedString(w, *in.Help, false)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = w.WriteString("# TYPE ")
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(name)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
metricType := in.GetType()
|
||||||
|
switch metricType {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
n, err = w.WriteString(" counter\n")
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
n, err = w.WriteString(" gauge\n")
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
n, err = w.WriteString(" summary\n")
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
n, err = w.WriteString(" untyped\n")
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
n, err = w.WriteString(" histogram\n")
|
||||||
|
default:
|
||||||
|
return written, fmt.Errorf("unknown metric type %s", metricType.String())
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally the samples, one line for each.
|
||||||
|
for _, metric := range in.Metric {
|
||||||
|
switch metricType {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
if metric.Counter == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected counter in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "", metric, "", 0,
|
||||||
|
metric.Counter.GetValue(),
|
||||||
|
)
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
if metric.Gauge == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected gauge in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "", metric, "", 0,
|
||||||
|
metric.Gauge.GetValue(),
|
||||||
|
)
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
if metric.Untyped == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected untyped in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "", metric, "", 0,
|
||||||
|
metric.Untyped.GetValue(),
|
||||||
|
)
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
if metric.Summary == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected summary in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
for _, q := range metric.Summary.Quantile {
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "", metric,
|
||||||
|
model.QuantileLabel, q.GetQuantile(),
|
||||||
|
q.GetValue(),
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "_sum", metric, "", 0,
|
||||||
|
metric.Summary.GetSampleSum(),
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "_count", metric, "", 0,
|
||||||
|
float64(metric.Summary.GetSampleCount()),
|
||||||
|
)
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
if metric.Histogram == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected histogram in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
infSeen := false
|
||||||
|
for _, b := range metric.Histogram.Bucket {
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "_bucket", metric,
|
||||||
|
model.BucketLabel, b.GetUpperBound(),
|
||||||
|
float64(b.GetCumulativeCount()),
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if math.IsInf(b.GetUpperBound(), +1) {
|
||||||
|
infSeen = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !infSeen {
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "_bucket", metric,
|
||||||
|
model.BucketLabel, math.Inf(+1),
|
||||||
|
float64(metric.Histogram.GetSampleCount()),
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "_sum", metric, "", 0,
|
||||||
|
metric.Histogram.GetSampleSum(),
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeSample(
|
||||||
|
w, name, "_count", metric, "", 0,
|
||||||
|
float64(metric.Histogram.GetSampleCount()),
|
||||||
|
)
|
||||||
|
default:
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"unexpected type in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeSample writes a single sample in text format to w, given the metric
|
||||||
|
// name, the metric proto message itself, optionally an additional label name
|
||||||
|
// with a float64 value (use empty string as label name if not required), and
|
||||||
|
// the value. The function returns the number of bytes written and any error
|
||||||
|
// encountered.
|
||||||
|
func writeSample(
|
||||||
|
w enhancedWriter,
|
||||||
|
name, suffix string,
|
||||||
|
metric *dto.Metric,
|
||||||
|
additionalLabelName string, additionalLabelValue float64,
|
||||||
|
value float64,
|
||||||
|
) (int, error) {
|
||||||
|
var written int
|
||||||
|
n, err := w.WriteString(name)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if suffix != "" {
|
||||||
|
n, err = w.WriteString(suffix)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = writeLabelPairs(
|
||||||
|
w, metric.Label, additionalLabelName, additionalLabelValue,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeFloat(w, value)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if metric.TimestampMs != nil {
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeInt(w, *metric.TimestampMs)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeLabelPairs converts a slice of LabelPair proto messages plus the
|
||||||
|
// explicitly given additional label pair into text formatted as required by the
|
||||||
|
// text format and writes it to 'w'. An empty slice in combination with an empty
|
||||||
|
// string 'additionalLabelName' results in nothing being written. Otherwise, the
|
||||||
|
// label pairs are written, escaped as required by the text format, and enclosed
|
||||||
|
// in '{...}'. The function returns the number of bytes written and any error
|
||||||
|
// encountered.
|
||||||
|
func writeLabelPairs(
|
||||||
|
w enhancedWriter,
|
||||||
|
in []*dto.LabelPair,
|
||||||
|
additionalLabelName string, additionalLabelValue float64,
|
||||||
|
) (int, error) {
|
||||||
|
if len(in) == 0 && additionalLabelName == "" {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
written int
|
||||||
|
separator byte = '{'
|
||||||
|
)
|
||||||
|
for _, lp := range in {
|
||||||
|
err := w.WriteByte(separator)
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err := w.WriteString(lp.GetName())
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(`="`)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeEscapedString(w, lp.GetValue(), true)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte('"')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
separator = ','
|
||||||
|
}
|
||||||
|
if additionalLabelName != "" {
|
||||||
|
err := w.WriteByte(separator)
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err := w.WriteString(additionalLabelName)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(`="`)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeFloat(w, additionalLabelValue)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte('"')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := w.WriteByte('}')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
|
||||||
|
// includeDoubleQuote is true - '"' by '\"'.
|
||||||
|
var (
|
||||||
|
escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
||||||
|
quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
||||||
|
)
|
||||||
|
|
||||||
|
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
|
||||||
|
if includeDoubleQuote {
|
||||||
|
return quotedEscaper.WriteString(w, v)
|
||||||
|
}
|
||||||
|
return escaper.WriteString(w, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
|
||||||
|
// a few common cases for increased efficiency. For non-hardcoded cases, it uses
|
||||||
|
// strconv.AppendFloat to avoid allocations, similar to writeInt.
|
||||||
|
func writeFloat(w enhancedWriter, f float64) (int, error) {
|
||||||
|
switch {
|
||||||
|
case f == 1:
|
||||||
|
return 1, w.WriteByte('1')
|
||||||
|
case f == 0:
|
||||||
|
return 1, w.WriteByte('0')
|
||||||
|
case f == -1:
|
||||||
|
return w.WriteString("-1")
|
||||||
|
case math.IsNaN(f):
|
||||||
|
return w.WriteString("NaN")
|
||||||
|
case math.IsInf(f, +1):
|
||||||
|
return w.WriteString("+Inf")
|
||||||
|
case math.IsInf(f, -1):
|
||||||
|
return w.WriteString("-Inf")
|
||||||
|
default:
|
||||||
|
bp := numBufPool.Get().(*[]byte)
|
||||||
|
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
|
||||||
|
written, err := w.Write(*bp)
|
||||||
|
numBufPool.Put(bp)
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
|
||||||
|
// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
|
||||||
|
// allocations.
|
||||||
|
func writeInt(w enhancedWriter, i int64) (int, error) {
|
||||||
|
bp := numBufPool.Get().(*[]byte)
|
||||||
|
*bp = strconv.AppendInt((*bp)[:0], i, 10)
|
||||||
|
written, err := w.Write(*bp)
|
||||||
|
numBufPool.Put(bp)
|
||||||
|
return written, err
|
||||||
|
}
|
779
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
Normal file
779
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
Normal file
|
@ -0,0 +1,779 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package expfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A stateFn is a function that represents a state in a state machine. By
|
||||||
|
// executing it, the state is progressed to the next state. The stateFn returns
|
||||||
|
// another stateFn, which represents the new state. The end state is represented
|
||||||
|
// by nil.
|
||||||
|
type stateFn func() stateFn
|
||||||
|
|
||||||
|
// ParseError signals errors while parsing the simple and flat text-based
|
||||||
|
// exchange format.
|
||||||
|
type ParseError struct {
|
||||||
|
Line int
|
||||||
|
Msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (e ParseError) Error() string {
|
||||||
|
return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TextParser is used to parse the simple and flat text-based exchange format. Its
|
||||||
|
// zero value is ready to use.
|
||||||
|
type TextParser struct {
|
||||||
|
metricFamiliesByName map[string]*dto.MetricFamily
|
||||||
|
buf *bufio.Reader // Where the parsed input is read through.
|
||||||
|
err error // Most recent error.
|
||||||
|
lineCount int // Tracks the line count for error messages.
|
||||||
|
currentByte byte // The most recent byte read.
|
||||||
|
currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
|
||||||
|
currentMF *dto.MetricFamily
|
||||||
|
currentMetric *dto.Metric
|
||||||
|
currentLabelPair *dto.LabelPair
|
||||||
|
|
||||||
|
// The remaining member variables are only used for summaries/histograms.
|
||||||
|
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
|
||||||
|
// Summary specific.
|
||||||
|
summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
|
||||||
|
currentQuantile float64
|
||||||
|
// Histogram specific.
|
||||||
|
histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
|
||||||
|
currentBucket float64
|
||||||
|
// These tell us if the currently processed line ends on '_count' or
|
||||||
|
// '_sum' respectively and belong to a summary/histogram, representing the sample
|
||||||
|
// count and sum of that summary/histogram.
|
||||||
|
currentIsSummaryCount, currentIsSummarySum bool
|
||||||
|
currentIsHistogramCount, currentIsHistogramSum bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
|
||||||
|
// format and creates MetricFamily proto messages. It returns the MetricFamily
|
||||||
|
// proto messages in a map where the metric names are the keys, along with any
|
||||||
|
// error encountered.
|
||||||
|
//
|
||||||
|
// If the input contains duplicate metrics (i.e. lines with the same metric name
|
||||||
|
// and exactly the same label set), the resulting MetricFamily will contain
|
||||||
|
// duplicate Metric proto messages. Similar is true for duplicate label
|
||||||
|
// names. Checks for duplicates have to be performed separately, if required.
|
||||||
|
// Also note that neither the metrics within each MetricFamily are sorted nor
|
||||||
|
// the label pairs within each Metric. Sorting is not required for the most
|
||||||
|
// frequent use of this method, which is sample ingestion in the Prometheus
|
||||||
|
// server. However, for presentation purposes, you might want to sort the
|
||||||
|
// metrics, and in some cases, you must sort the labels, e.g. for consumption by
|
||||||
|
// the metric family injection hook of the Prometheus registry.
|
||||||
|
//
|
||||||
|
// Summaries and histograms are rather special beasts. You would probably not
|
||||||
|
// use them in the simple text format anyway. This method can deal with
|
||||||
|
// summaries and histograms if they are presented in exactly the way the
|
||||||
|
// text.Create function creates them.
|
||||||
|
//
|
||||||
|
// This method must not be called concurrently. If you want to parse different
|
||||||
|
// input concurrently, instantiate a separate Parser for each goroutine.
|
||||||
|
func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
|
||||||
|
p.reset(in)
|
||||||
|
for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
|
||||||
|
// Magic happens here...
|
||||||
|
}
|
||||||
|
// Get rid of empty metric families.
|
||||||
|
for k, mf := range p.metricFamiliesByName {
|
||||||
|
if len(mf.GetMetric()) == 0 {
|
||||||
|
delete(p.metricFamiliesByName, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If p.err is io.EOF now, we have run into a premature end of the input
|
||||||
|
// stream. Turn this error into something nicer and more
|
||||||
|
// meaningful. (io.EOF is often used as a signal for the legitimate end
|
||||||
|
// of an input stream.)
|
||||||
|
if p.err == io.EOF {
|
||||||
|
p.parseError("unexpected end of input stream")
|
||||||
|
}
|
||||||
|
return p.metricFamiliesByName, p.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TextParser) reset(in io.Reader) {
|
||||||
|
p.metricFamiliesByName = map[string]*dto.MetricFamily{}
|
||||||
|
if p.buf == nil {
|
||||||
|
p.buf = bufio.NewReader(in)
|
||||||
|
} else {
|
||||||
|
p.buf.Reset(in)
|
||||||
|
}
|
||||||
|
p.err = nil
|
||||||
|
p.lineCount = 0
|
||||||
|
if p.summaries == nil || len(p.summaries) > 0 {
|
||||||
|
p.summaries = map[uint64]*dto.Metric{}
|
||||||
|
}
|
||||||
|
if p.histograms == nil || len(p.histograms) > 0 {
|
||||||
|
p.histograms = map[uint64]*dto.Metric{}
|
||||||
|
}
|
||||||
|
p.currentQuantile = math.NaN()
|
||||||
|
p.currentBucket = math.NaN()
|
||||||
|
}
|
||||||
|
|
||||||
|
// startOfLine represents the state where the next byte read from p.buf is the
|
||||||
|
// start of a line (or whitespace leading up to it).
|
||||||
|
func (p *TextParser) startOfLine() stateFn {
|
||||||
|
p.lineCount++
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
// This is the only place that we expect to see io.EOF,
|
||||||
|
// which is not an error but the signal that we are done.
|
||||||
|
// Any other error that happens to align with the start of
|
||||||
|
// a line is still an error.
|
||||||
|
if p.err == io.EOF {
|
||||||
|
p.err = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch p.currentByte {
|
||||||
|
case '#':
|
||||||
|
return p.startComment
|
||||||
|
case '\n':
|
||||||
|
return p.startOfLine // Empty line, start the next one.
|
||||||
|
}
|
||||||
|
return p.readingMetricName
|
||||||
|
}
|
||||||
|
|
||||||
|
// startComment represents the state where the next byte read from p.buf is the
|
||||||
|
// start of a comment (or whitespace leading up to it).
|
||||||
|
func (p *TextParser) startComment() stateFn {
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.currentByte == '\n' {
|
||||||
|
return p.startOfLine
|
||||||
|
}
|
||||||
|
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
// If we have hit the end of line already, there is nothing left
|
||||||
|
// to do. This is not considered a syntax error.
|
||||||
|
if p.currentByte == '\n' {
|
||||||
|
return p.startOfLine
|
||||||
|
}
|
||||||
|
keyword := p.currentToken.String()
|
||||||
|
if keyword != "HELP" && keyword != "TYPE" {
|
||||||
|
// Generic comment, ignore by fast forwarding to end of line.
|
||||||
|
for p.currentByte != '\n' {
|
||||||
|
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p.startOfLine
|
||||||
|
}
|
||||||
|
// There is something. Next has to be a metric name.
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.readTokenAsMetricName(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.currentByte == '\n' {
|
||||||
|
// At the end of the line already.
|
||||||
|
// Again, this is not considered a syntax error.
|
||||||
|
return p.startOfLine
|
||||||
|
}
|
||||||
|
if !isBlankOrTab(p.currentByte) {
|
||||||
|
p.parseError("invalid metric name in comment")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p.setOrCreateCurrentMF()
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.currentByte == '\n' {
|
||||||
|
// At the end of the line already.
|
||||||
|
// Again, this is not considered a syntax error.
|
||||||
|
return p.startOfLine
|
||||||
|
}
|
||||||
|
switch keyword {
|
||||||
|
case "HELP":
|
||||||
|
return p.readingHelp
|
||||||
|
case "TYPE":
|
||||||
|
return p.readingType
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
|
||||||
|
}
|
||||||
|
|
||||||
|
// readingMetricName represents the state where the last byte read (now in
|
||||||
|
// p.currentByte) is the first byte of a metric name.
|
||||||
|
func (p *TextParser) readingMetricName() stateFn {
|
||||||
|
if p.readTokenAsMetricName(); p.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if p.currentToken.Len() == 0 {
|
||||||
|
p.parseError("invalid metric name")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p.setOrCreateCurrentMF()
|
||||||
|
// Now is the time to fix the type if it hasn't happened yet.
|
||||||
|
if p.currentMF.Type == nil {
|
||||||
|
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
||||||
|
}
|
||||||
|
p.currentMetric = &dto.Metric{}
|
||||||
|
// Do not append the newly created currentMetric to
|
||||||
|
// currentMF.Metric right now. First wait if this is a summary,
|
||||||
|
// and the metric exists already, which we can only know after
|
||||||
|
// having read all the labels.
|
||||||
|
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
return p.readingLabels
|
||||||
|
}
|
||||||
|
|
||||||
|
// readingLabels represents the state where the last byte read (now in
|
||||||
|
// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
|
||||||
|
// first byte of the value (otherwise).
|
||||||
|
func (p *TextParser) readingLabels() stateFn {
|
||||||
|
// Summaries/histograms are special. We have to reset the
|
||||||
|
// currentLabels map, currentQuantile and currentBucket before starting to
|
||||||
|
// read labels.
|
||||||
|
if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||||
|
p.currentLabels = map[string]string{}
|
||||||
|
p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
|
||||||
|
p.currentQuantile = math.NaN()
|
||||||
|
p.currentBucket = math.NaN()
|
||||||
|
}
|
||||||
|
if p.currentByte != '{' {
|
||||||
|
return p.readingValue
|
||||||
|
}
|
||||||
|
return p.startLabelName
|
||||||
|
}
|
||||||
|
|
||||||
|
// startLabelName represents the state where the next byte read from p.buf is
|
||||||
|
// the start of a label name (or whitespace leading up to it).
|
||||||
|
func (p *TextParser) startLabelName() stateFn {
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.currentByte == '}' {
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
return p.readingValue
|
||||||
|
}
|
||||||
|
if p.readTokenAsLabelName(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.currentToken.Len() == 0 {
|
||||||
|
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
|
||||||
|
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
|
||||||
|
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
|
||||||
|
// labels to 'real' labels.
|
||||||
|
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
||||||
|
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
||||||
|
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
|
||||||
|
}
|
||||||
|
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.currentByte != '=' {
|
||||||
|
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Check for duplicate label names.
|
||||||
|
labels := make(map[string]struct{})
|
||||||
|
for _, l := range p.currentMetric.Label {
|
||||||
|
lName := l.GetName()
|
||||||
|
if _, exists := labels[lName]; !exists {
|
||||||
|
labels[lName] = struct{}{}
|
||||||
|
} else {
|
||||||
|
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p.startLabelValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// startLabelValue represents the state where the next byte read from p.buf is
|
||||||
|
// the start of a (quoted) label value (or whitespace leading up to it).
|
||||||
|
func (p *TextParser) startLabelValue() stateFn {
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.currentByte != '"' {
|
||||||
|
p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if p.readTokenAsLabelValue(); p.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !model.LabelValue(p.currentToken.String()).IsValid() {
|
||||||
|
p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p.currentLabelPair.Value = proto.String(p.currentToken.String())
|
||||||
|
// Special treatment of summaries:
|
||||||
|
// - Quantile labels are special, will result in dto.Quantile later.
|
||||||
|
// - Other labels have to be added to currentLabels for signature calculation.
|
||||||
|
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||||
|
if p.currentLabelPair.GetName() == model.QuantileLabel {
|
||||||
|
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
||||||
|
// Create a more helpful error message.
|
||||||
|
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Similar special treatment of histograms.
|
||||||
|
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||||
|
if p.currentLabelPair.GetName() == model.BucketLabel {
|
||||||
|
if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
||||||
|
// Create a more helpful error message.
|
||||||
|
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
switch p.currentByte {
|
||||||
|
case ',':
|
||||||
|
return p.startLabelName
|
||||||
|
|
||||||
|
case '}':
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
return p.readingValue
|
||||||
|
default:
|
||||||
|
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readingValue represents the state where the last byte read (now in
|
||||||
|
// p.currentByte) is the first byte of the sample value (i.e. a float).
|
||||||
|
func (p *TextParser) readingValue() stateFn {
|
||||||
|
// When we are here, we have read all the labels, so for the
|
||||||
|
// special case of a summary/histogram, we can finally find out
|
||||||
|
// if the metric already exists.
|
||||||
|
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||||
|
signature := model.LabelsToSignature(p.currentLabels)
|
||||||
|
if summary := p.summaries[signature]; summary != nil {
|
||||||
|
p.currentMetric = summary
|
||||||
|
} else {
|
||||||
|
p.summaries[signature] = p.currentMetric
|
||||||
|
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
||||||
|
}
|
||||||
|
} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||||
|
signature := model.LabelsToSignature(p.currentLabels)
|
||||||
|
if histogram := p.histograms[signature]; histogram != nil {
|
||||||
|
p.currentMetric = histogram
|
||||||
|
} else {
|
||||||
|
p.histograms[signature] = p.currentMetric
|
||||||
|
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
||||||
|
}
|
||||||
|
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
value, err := parseFloat(p.currentToken.String())
|
||||||
|
if err != nil {
|
||||||
|
// Create a more helpful error message.
|
||||||
|
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch p.currentMF.GetType() {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
// *sigh*
|
||||||
|
if p.currentMetric.Summary == nil {
|
||||||
|
p.currentMetric.Summary = &dto.Summary{}
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case p.currentIsSummaryCount:
|
||||||
|
p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
|
||||||
|
case p.currentIsSummarySum:
|
||||||
|
p.currentMetric.Summary.SampleSum = proto.Float64(value)
|
||||||
|
case !math.IsNaN(p.currentQuantile):
|
||||||
|
p.currentMetric.Summary.Quantile = append(
|
||||||
|
p.currentMetric.Summary.Quantile,
|
||||||
|
&dto.Quantile{
|
||||||
|
Quantile: proto.Float64(p.currentQuantile),
|
||||||
|
Value: proto.Float64(value),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
// *sigh*
|
||||||
|
if p.currentMetric.Histogram == nil {
|
||||||
|
p.currentMetric.Histogram = &dto.Histogram{}
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case p.currentIsHistogramCount:
|
||||||
|
p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
|
||||||
|
case p.currentIsHistogramSum:
|
||||||
|
p.currentMetric.Histogram.SampleSum = proto.Float64(value)
|
||||||
|
case !math.IsNaN(p.currentBucket):
|
||||||
|
p.currentMetric.Histogram.Bucket = append(
|
||||||
|
p.currentMetric.Histogram.Bucket,
|
||||||
|
&dto.Bucket{
|
||||||
|
UpperBound: proto.Float64(p.currentBucket),
|
||||||
|
CumulativeCount: proto.Uint64(uint64(value)),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
|
||||||
|
}
|
||||||
|
if p.currentByte == '\n' {
|
||||||
|
return p.startOfLine
|
||||||
|
}
|
||||||
|
return p.startTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
// startTimestamp represents the state where the next byte read from p.buf is
|
||||||
|
// the start of the timestamp (or whitespace leading up to it).
|
||||||
|
func (p *TextParser) startTimestamp() stateFn {
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
// Create a more helpful error message.
|
||||||
|
p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p.currentMetric.TimestampMs = proto.Int64(timestamp)
|
||||||
|
if p.readTokenUntilNewline(false); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.currentToken.Len() > 0 {
|
||||||
|
p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return p.startOfLine
|
||||||
|
}
|
||||||
|
|
||||||
|
// readingHelp represents the state where the last byte read (now in
|
||||||
|
// p.currentByte) is the first byte of the docstring after 'HELP'.
|
||||||
|
func (p *TextParser) readingHelp() stateFn {
|
||||||
|
if p.currentMF.Help != nil {
|
||||||
|
p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Rest of line is the docstring.
|
||||||
|
if p.readTokenUntilNewline(true); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
p.currentMF.Help = proto.String(p.currentToken.String())
|
||||||
|
return p.startOfLine
|
||||||
|
}
|
||||||
|
|
||||||
|
// readingType represents the state where the last byte read (now in
|
||||||
|
// p.currentByte) is the first byte of the type hint after 'HELP'.
|
||||||
|
func (p *TextParser) readingType() stateFn {
|
||||||
|
if p.currentMF.Type != nil {
|
||||||
|
p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Rest of line is the type.
|
||||||
|
if p.readTokenUntilNewline(false); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
|
||||||
|
if !ok {
|
||||||
|
p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p.currentMF.Type = dto.MetricType(metricType).Enum()
|
||||||
|
return p.startOfLine
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseError sets p.err to a ParseError at the current line with the given
|
||||||
|
// message.
|
||||||
|
func (p *TextParser) parseError(msg string) {
|
||||||
|
p.err = ParseError{
|
||||||
|
Line: p.lineCount,
|
||||||
|
Msg: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
|
||||||
|
// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
|
||||||
|
func (p *TextParser) skipBlankTab() {
|
||||||
|
for {
|
||||||
|
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
|
||||||
|
// anything if p.currentByte is neither ' ' nor '\t'.
|
||||||
|
func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
|
||||||
|
if isBlankOrTab(p.currentByte) {
|
||||||
|
p.skipBlankTab()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
|
||||||
|
// first byte considered is the byte already read (now in p.currentByte). The
|
||||||
|
// first whitespace byte encountered is still copied into p.currentByte, but not
|
||||||
|
// into p.currentToken.
|
||||||
|
func (p *TextParser) readTokenUntilWhitespace() {
|
||||||
|
p.currentToken.Reset()
|
||||||
|
for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
|
||||||
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
p.currentByte, p.err = p.buf.ReadByte()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
|
||||||
|
// byte considered is the byte already read (now in p.currentByte). The first
|
||||||
|
// newline byte encountered is still copied into p.currentByte, but not into
|
||||||
|
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
|
||||||
|
// recognized: '\\' translates into '\', and '\n' into a line-feed character.
|
||||||
|
// All other escape sequences are invalid and cause an error.
|
||||||
|
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
||||||
|
p.currentToken.Reset()
|
||||||
|
escaped := false
|
||||||
|
for p.err == nil {
|
||||||
|
if recognizeEscapeSequence && escaped {
|
||||||
|
switch p.currentByte {
|
||||||
|
case '\\':
|
||||||
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
case 'n':
|
||||||
|
p.currentToken.WriteByte('\n')
|
||||||
|
default:
|
||||||
|
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
escaped = false
|
||||||
|
} else {
|
||||||
|
switch p.currentByte {
|
||||||
|
case '\n':
|
||||||
|
return
|
||||||
|
case '\\':
|
||||||
|
escaped = true
|
||||||
|
default:
|
||||||
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.currentByte, p.err = p.buf.ReadByte()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
|
||||||
|
// The first byte considered is the byte already read (now in p.currentByte).
|
||||||
|
// The first byte not part of a metric name is still copied into p.currentByte,
|
||||||
|
// but not into p.currentToken.
|
||||||
|
func (p *TextParser) readTokenAsMetricName() {
|
||||||
|
p.currentToken.Reset()
|
||||||
|
if !isValidMetricNameStart(p.currentByte) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
p.currentByte, p.err = p.buf.ReadByte()
|
||||||
|
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
|
||||||
|
// The first byte considered is the byte already read (now in p.currentByte).
|
||||||
|
// The first byte not part of a label name is still copied into p.currentByte,
|
||||||
|
// but not into p.currentToken.
|
||||||
|
func (p *TextParser) readTokenAsLabelName() {
|
||||||
|
p.currentToken.Reset()
|
||||||
|
if !isValidLabelNameStart(p.currentByte) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
p.currentByte, p.err = p.buf.ReadByte()
|
||||||
|
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
|
||||||
|
// In contrast to the other 'readTokenAs...' functions, which start with the
|
||||||
|
// last read byte in p.currentByte, this method ignores p.currentByte and starts
|
||||||
|
// with reading a new byte from p.buf. The first byte not part of a label value
|
||||||
|
// is still copied into p.currentByte, but not into p.currentToken.
|
||||||
|
func (p *TextParser) readTokenAsLabelValue() {
|
||||||
|
p.currentToken.Reset()
|
||||||
|
escaped := false
|
||||||
|
for {
|
||||||
|
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if escaped {
|
||||||
|
switch p.currentByte {
|
||||||
|
case '"', '\\':
|
||||||
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
case 'n':
|
||||||
|
p.currentToken.WriteByte('\n')
|
||||||
|
default:
|
||||||
|
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
escaped = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch p.currentByte {
|
||||||
|
case '"':
|
||||||
|
return
|
||||||
|
case '\n':
|
||||||
|
p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
|
||||||
|
return
|
||||||
|
case '\\':
|
||||||
|
escaped = true
|
||||||
|
default:
|
||||||
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TextParser) setOrCreateCurrentMF() {
|
||||||
|
p.currentIsSummaryCount = false
|
||||||
|
p.currentIsSummarySum = false
|
||||||
|
p.currentIsHistogramCount = false
|
||||||
|
p.currentIsHistogramSum = false
|
||||||
|
name := p.currentToken.String()
|
||||||
|
if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Try out if this is a _sum or _count for a summary/histogram.
|
||||||
|
summaryName := summaryMetricName(name)
|
||||||
|
if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
|
||||||
|
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||||
|
if isCount(name) {
|
||||||
|
p.currentIsSummaryCount = true
|
||||||
|
}
|
||||||
|
if isSum(name) {
|
||||||
|
p.currentIsSummarySum = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
histogramName := histogramMetricName(name)
|
||||||
|
if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
|
||||||
|
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||||
|
if isCount(name) {
|
||||||
|
p.currentIsHistogramCount = true
|
||||||
|
}
|
||||||
|
if isSum(name) {
|
||||||
|
p.currentIsHistogramSum = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
|
||||||
|
p.metricFamiliesByName[name] = p.currentMF
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidLabelNameStart(b byte) bool {
|
||||||
|
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidLabelNameContinuation(b byte) bool {
|
||||||
|
return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidMetricNameStart(b byte) bool {
|
||||||
|
return isValidLabelNameStart(b) || b == ':'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidMetricNameContinuation(b byte) bool {
|
||||||
|
return isValidLabelNameContinuation(b) || b == ':'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBlankOrTab(b byte) bool {
|
||||||
|
return b == ' ' || b == '\t'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCount(name string) bool {
|
||||||
|
return len(name) > 6 && name[len(name)-6:] == "_count"
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSum(name string) bool {
|
||||||
|
return len(name) > 4 && name[len(name)-4:] == "_sum"
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBucket(name string) bool {
|
||||||
|
return len(name) > 7 && name[len(name)-7:] == "_bucket"
|
||||||
|
}
|
||||||
|
|
||||||
|
func summaryMetricName(name string) string {
|
||||||
|
switch {
|
||||||
|
case isCount(name):
|
||||||
|
return name[:len(name)-6]
|
||||||
|
case isSum(name):
|
||||||
|
return name[:len(name)-4]
|
||||||
|
default:
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func histogramMetricName(name string) string {
|
||||||
|
switch {
|
||||||
|
case isCount(name):
|
||||||
|
return name[:len(name)-6]
|
||||||
|
case isSum(name):
|
||||||
|
return name[:len(name)-4]
|
||||||
|
case isBucket(name):
|
||||||
|
return name[:len(name)-7]
|
||||||
|
default:
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFloat(s string) (float64, error) {
|
||||||
|
if strings.ContainsAny(s, "pP_") {
|
||||||
|
return 0, fmt.Errorf("unsupported character in float")
|
||||||
|
}
|
||||||
|
return strconv.ParseFloat(s, 64)
|
||||||
|
}
|
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
Normal file
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
PACKAGE
|
||||||
|
|
||||||
|
package goautoneg
|
||||||
|
import "bitbucket.org/ww/goautoneg"
|
||||||
|
|
||||||
|
HTTP Content-Type Autonegotiation.
|
||||||
|
|
||||||
|
The functions in this package implement the behaviour specified in
|
||||||
|
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||||
|
|
||||||
|
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
|
||||||
|
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||||
|
names of its contributors may be used to endorse or promote
|
||||||
|
products derived from this software without specific prior written
|
||||||
|
permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
FUNCTIONS
|
||||||
|
|
||||||
|
func Negotiate(header string, alternatives []string) (content_type string)
|
||||||
|
Negotiate the most appropriate content_type given the accept header
|
||||||
|
and a list of alternatives.
|
||||||
|
|
||||||
|
func ParseAccept(header string) (accept []Accept)
|
||||||
|
Parse an Accept Header string returning a sorted list
|
||||||
|
of clauses
|
||||||
|
|
||||||
|
|
||||||
|
TYPES
|
||||||
|
|
||||||
|
type Accept struct {
|
||||||
|
Type, SubType string
|
||||||
|
Q float32
|
||||||
|
Params map[string]string
|
||||||
|
}
|
||||||
|
Structure to represent a clause in an HTTP Accept Header
|
||||||
|
|
||||||
|
|
||||||
|
SUBDIRECTORIES
|
||||||
|
|
||||||
|
.hg
|
160
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
Normal file
160
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
Normal file
|
@ -0,0 +1,160 @@
|
||||||
|
/*
|
||||||
|
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
HTTP Content-Type Autonegotiation.
|
||||||
|
|
||||||
|
The functions in this package implement the behaviour specified in
|
||||||
|
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
|
||||||
|
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||||
|
names of its contributors may be used to endorse or promote
|
||||||
|
products derived from this software without specific prior written
|
||||||
|
permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
package goautoneg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Structure to represent a clause in an HTTP Accept Header
|
||||||
|
type Accept struct {
|
||||||
|
Type, SubType string
|
||||||
|
Q float64
|
||||||
|
Params map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// For internal use, so that we can use the sort interface
|
||||||
|
type accept_slice []Accept
|
||||||
|
|
||||||
|
func (accept accept_slice) Len() int {
|
||||||
|
slice := []Accept(accept)
|
||||||
|
return len(slice)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (accept accept_slice) Less(i, j int) bool {
|
||||||
|
slice := []Accept(accept)
|
||||||
|
ai, aj := slice[i], slice[j]
|
||||||
|
if ai.Q > aj.Q {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if ai.Type != "*" && aj.Type == "*" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if ai.SubType != "*" && aj.SubType == "*" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (accept accept_slice) Swap(i, j int) {
|
||||||
|
slice := []Accept(accept)
|
||||||
|
slice[i], slice[j] = slice[j], slice[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse an Accept Header string returning a sorted list
|
||||||
|
// of clauses
|
||||||
|
func ParseAccept(header string) (accept []Accept) {
|
||||||
|
parts := strings.Split(header, ",")
|
||||||
|
accept = make([]Accept, 0, len(parts))
|
||||||
|
for _, part := range parts {
|
||||||
|
part := strings.Trim(part, " ")
|
||||||
|
|
||||||
|
a := Accept{}
|
||||||
|
a.Params = make(map[string]string)
|
||||||
|
a.Q = 1.0
|
||||||
|
|
||||||
|
mrp := strings.Split(part, ";")
|
||||||
|
|
||||||
|
media_range := mrp[0]
|
||||||
|
sp := strings.Split(media_range, "/")
|
||||||
|
a.Type = strings.Trim(sp[0], " ")
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case len(sp) == 1 && a.Type == "*":
|
||||||
|
a.SubType = "*"
|
||||||
|
case len(sp) == 2:
|
||||||
|
a.SubType = strings.Trim(sp[1], " ")
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(mrp) == 1 {
|
||||||
|
accept = append(accept, a)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, param := range mrp[1:] {
|
||||||
|
sp := strings.SplitN(param, "=", 2)
|
||||||
|
if len(sp) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
token := strings.Trim(sp[0], " ")
|
||||||
|
if token == "q" {
|
||||||
|
a.Q, _ = strconv.ParseFloat(sp[1], 32)
|
||||||
|
} else {
|
||||||
|
a.Params[token] = strings.Trim(sp[1], " ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
accept = append(accept, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := accept_slice(accept)
|
||||||
|
sort.Sort(slice)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Negotiate the most appropriate content_type given the accept header
|
||||||
|
// and a list of alternatives.
|
||||||
|
func Negotiate(header string, alternatives []string) (content_type string) {
|
||||||
|
asp := make([][]string, 0, len(alternatives))
|
||||||
|
for _, ctype := range alternatives {
|
||||||
|
asp = append(asp, strings.SplitN(ctype, "/", 2))
|
||||||
|
}
|
||||||
|
for _, clause := range ParseAccept(header) {
|
||||||
|
for i, ctsp := range asp {
|
||||||
|
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
|
||||||
|
content_type = alternatives[i]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if clause.Type == ctsp[0] && clause.SubType == "*" {
|
||||||
|
content_type = alternatives[i]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if clause.Type == "*" && clause.SubType == "*" {
|
||||||
|
content_type = alternatives[i]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
136
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
Normal file
136
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
// Copyright 2013 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AlertStatus string
|
||||||
|
|
||||||
|
const (
|
||||||
|
AlertFiring AlertStatus = "firing"
|
||||||
|
AlertResolved AlertStatus = "resolved"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Alert is a generic representation of an alert in the Prometheus eco-system.
|
||||||
|
type Alert struct {
|
||||||
|
// Label value pairs for purpose of aggregation, matching, and disposition
|
||||||
|
// dispatching. This must minimally include an "alertname" label.
|
||||||
|
Labels LabelSet `json:"labels"`
|
||||||
|
|
||||||
|
// Extra key/value information which does not define alert identity.
|
||||||
|
Annotations LabelSet `json:"annotations"`
|
||||||
|
|
||||||
|
// The known time range for this alert. Both ends are optional.
|
||||||
|
StartsAt time.Time `json:"startsAt,omitempty"`
|
||||||
|
EndsAt time.Time `json:"endsAt,omitempty"`
|
||||||
|
GeneratorURL string `json:"generatorURL"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the alert. It is equivalent to the "alertname" label.
|
||||||
|
func (a *Alert) Name() string {
|
||||||
|
return string(a.Labels[AlertNameLabel])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fingerprint returns a unique hash for the alert. It is equivalent to
|
||||||
|
// the fingerprint of the alert's label set.
|
||||||
|
func (a *Alert) Fingerprint() Fingerprint {
|
||||||
|
return a.Labels.Fingerprint()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Alert) String() string {
|
||||||
|
s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
|
||||||
|
if a.Resolved() {
|
||||||
|
return s + "[resolved]"
|
||||||
|
}
|
||||||
|
return s + "[active]"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolved returns true iff the activity interval ended in the past.
|
||||||
|
func (a *Alert) Resolved() bool {
|
||||||
|
return a.ResolvedAt(time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolvedAt returns true off the activity interval ended before
|
||||||
|
// the given timestamp.
|
||||||
|
func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||||
|
if a.EndsAt.IsZero() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return !a.EndsAt.After(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns the status of the alert.
|
||||||
|
func (a *Alert) Status() AlertStatus {
|
||||||
|
if a.Resolved() {
|
||||||
|
return AlertResolved
|
||||||
|
}
|
||||||
|
return AlertFiring
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks whether the alert data is inconsistent.
|
||||||
|
func (a *Alert) Validate() error {
|
||||||
|
if a.StartsAt.IsZero() {
|
||||||
|
return fmt.Errorf("start time missing")
|
||||||
|
}
|
||||||
|
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
|
||||||
|
return fmt.Errorf("start time must be before end time")
|
||||||
|
}
|
||||||
|
if err := a.Labels.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("invalid label set: %s", err)
|
||||||
|
}
|
||||||
|
if len(a.Labels) == 0 {
|
||||||
|
return fmt.Errorf("at least one label pair required")
|
||||||
|
}
|
||||||
|
if err := a.Annotations.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("invalid annotations: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alert is a list of alerts that can be sorted in chronological order.
|
||||||
|
type Alerts []*Alert
|
||||||
|
|
||||||
|
func (as Alerts) Len() int { return len(as) }
|
||||||
|
func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
|
||||||
|
|
||||||
|
func (as Alerts) Less(i, j int) bool {
|
||||||
|
if as[i].StartsAt.Before(as[j].StartsAt) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if as[i].EndsAt.Before(as[j].EndsAt) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return as[i].Fingerprint() < as[j].Fingerprint()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFiring returns true iff one of the alerts is not resolved.
|
||||||
|
func (as Alerts) HasFiring() bool {
|
||||||
|
for _, a := range as {
|
||||||
|
if !a.Resolved() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns StatusFiring iff at least one of the alerts is firing.
|
||||||
|
func (as Alerts) Status() AlertStatus {
|
||||||
|
if as.HasFiring() {
|
||||||
|
return AlertFiring
|
||||||
|
}
|
||||||
|
return AlertResolved
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue